diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h --- a/llvm/include/llvm/CodeGen/TargetLowering.h +++ b/llvm/include/llvm/CodeGen/TargetLowering.h @@ -516,6 +516,10 @@ return PredictableSelectIsExpensive; } + virtual bool fallBackToDAGISel(const Instruction &Inst) const { + return false; + } + /// If a branch or a select condition is skewed in one direction by more than /// this factor, it is very likely to be predicted correctly. virtual BranchProbability getPredictableBranchThreshold() const; diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp --- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp +++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp @@ -2195,6 +2195,10 @@ else EntryBuilder->setDebugLoc(DebugLoc()); + auto &TLI = *MF->getSubtarget().getTargetLowering(); + if (TLI.fallBackToDAGISel(Inst)) + return false; + switch (Inst.getOpcode()) { #define HANDLE_INST(NUM, OPCODE, CLASS) \ case Instruction::OPCODE: \ diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h @@ -702,6 +702,9 @@ bool isVarArg) const override; /// Used for exception handling on Win64. bool needsFixedCatchObjects() const override; + + bool fallBackToDAGISel(const Instruction &Inst) const override; + private: /// Keep a pointer to the AArch64Subtarget around so that we can /// make the right decision when generating code for different targets. diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -14642,3 +14642,14 @@ } return TargetLoweringBase::shouldLocalize(MI, TTI); } + +bool AArch64TargetLowering::fallBackToDAGISel(const Instruction &Inst) const { + if (isa(Inst.getType())) + return true; + + for (unsigned i = 0; i < Inst.getNumOperands(); ++i) + if (isa(Inst.getOperand(i)->getType())) + return true; + + return false; +} diff --git a/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp b/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp --- a/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp +++ b/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp @@ -438,6 +438,9 @@ SmallVector SplitArgs; unsigned i = 0; for (auto &Arg : F.args()) { + if (isa(Arg.getType())) + return false; + if (DL.getTypeStoreSize(Arg.getType()).isZero()) continue; diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll --- a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll +++ b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll @@ -219,3 +219,23 @@ tail call void asm sideeffect "", "imr,imr,~{memory}"(i32 %x, i32 %y) ret void } + +; FALLBACK-WITH-REPORT-ERR: remark: :0:0: unable to lower arguments{{.*}}scalable_arg +; FALLBACK-WITH-REPORT-OUT-LABEL: scalable_arg +define @scalable_arg( %pred, i8* %addr) #1 { + %res = call @llvm.aarch64.sve.ld1.nxv16i8( %pred, i8* %addr) + ret %res +} + +; FALLBACK-WITH-REPORT-ERR: remark: :0:0: unable to translate instruction{{.*}}scalable_call +; FALLBACK-WITH-REPORT-OUT-LABEL: scalable_call +define @scalable_call(i8* %addr) #1 { + %pred = call @llvm.aarch64.sve.ptrue.nxv16i1(i32 0) + %res = call @llvm.aarch64.sve.ld1.nxv16i8( %pred, i8* %addr) + ret %res +} + +attributes #1 = { "target-features"="+sve" } + +declare @llvm.aarch64.sve.ptrue.nxv16i1(i32 %pattern) +declare @llvm.aarch64.sve.ld1.nxv16i8(, i8*) diff --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-ld1.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-ld1.ll --- a/llvm/test/CodeGen/AArch64/sve-intrinsics-ld1.ll +++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-ld1.ll @@ -1,4 +1,5 @@ ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s +; RUN: llc -O0 -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s ; ; LD1B diff --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-st1.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-st1.ll --- a/llvm/test/CodeGen/AArch64/sve-intrinsics-st1.ll +++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-st1.ll @@ -1,4 +1,5 @@ ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s 2>%t | FileCheck %s +; RUN: llc -O0 -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s ; RUN: FileCheck --check-prefix=WARN --allow-empty %s <%t ; WARN-NOT: warning