diff --git a/llvm/include/llvm/CodeGen/GlobalISel/CallLowering.h b/llvm/include/llvm/CodeGen/GlobalISel/CallLowering.h --- a/llvm/include/llvm/CodeGen/GlobalISel/CallLowering.h +++ b/llvm/include/llvm/CodeGen/GlobalISel/CallLowering.h @@ -290,6 +290,8 @@ return false; } + virtual bool fallBackToDAGISel(const Function &F) const { return false; } + /// This hook must be implemented to lower the incoming (formal) /// arguments, described by \p VRegs, for GlobalISel. Each argument /// must end up in the related virtual registers described by \p VRegs. diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp --- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp +++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp @@ -2384,6 +2384,14 @@ // Make our arguments/constants entry block fallthrough to the IR entry block. EntryBB->addSuccessor(&getMBB(F.front())); + if (CLI->fallBackToDAGISel(F)) { + OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure", + F.getSubprogram(), &F.getEntryBlock()); + R << "unable to lower function: " << ore::NV("Prototype", F.getType()); + reportTranslationError(*MF, *TPC, *ORE, R); + return false; + } + // Lower the actual args into this basic block. SmallVector, 8> VRegArgs; for (const Argument &Arg: F.args()) { diff --git a/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.h b/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.h --- a/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.h +++ b/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.h @@ -37,6 +37,8 @@ ArrayRef VRegs, Register SwiftErrorVReg) const override; + bool fallBackToDAGISel(const Function &F) const override; + bool lowerFormalArguments(MachineIRBuilder &MIRBuilder, const Function &F, ArrayRef> VRegs) const override; diff --git a/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp b/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp --- a/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp +++ b/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp @@ -427,6 +427,14 @@ } } +bool AArch64CallLowering::fallBackToDAGISel(const Function &F) const { + if (isa(F.getReturnType())) + return true; + return llvm::any_of(F.args(), [](const Argument &A) { + return isa(A.getType()); + }); +} + bool AArch64CallLowering::lowerFormalArguments( MachineIRBuilder &MIRBuilder, const Function &F, ArrayRef> VRegs) const { @@ -438,9 +446,6 @@ SmallVector SplitArgs; unsigned i = 0; for (auto &Arg : F.args()) { - if (isa(Arg.getType())) - return false; - if (DL.getTypeStoreSize(Arg.getType()).isZero()) continue; diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll --- a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll +++ b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll @@ -220,21 +220,30 @@ ret void } -; FALLBACK-WITH-REPORT-ERR: remark: :0:0: unable to lower arguments{{.*}}scalable_arg +; FALLBACK-WITH-REPORT-ERR: remark: :0:0: unable to lower function{{.*}}scalable_arg ; FALLBACK-WITH-REPORT-OUT-LABEL: scalable_arg define @scalable_arg( %pred, i8* %addr) #1 { %res = call @llvm.aarch64.sve.ld1.nxv16i8( %pred, i8* %addr) ret %res } -; FALLBACK-WITH-REPORT-ERR: remark: :0:0: unable to translate instruction{{.*}}scalable_call -; FALLBACK-WITH-REPORT-OUT-LABEL: scalable_call -define @scalable_call(i8* %addr) #1 { +; FALLBACK-WITH-REPORT-ERR: remark: :0:0: unable to lower function{{.*}}scalable_ret +; FALLBACK-WITH-REPORT-OUT-LABEL: scalable_ret +define @scalable_ret(i8* %addr) #1 { %pred = call @llvm.aarch64.sve.ptrue.nxv16i1(i32 0) %res = call @llvm.aarch64.sve.ld1.nxv16i8( %pred, i8* %addr) ret %res } +; FALLBACK-WITH-REPORT-ERR: remark: :0:0: unable to translate instruction{{.*}}scalable_call +; FALLBACK-WITH-REPORT-OUT-LABEL: scalable_call +define i8 @scalable_call(i8* %addr) #1 { + %pred = call @llvm.aarch64.sve.ptrue.nxv16i1(i32 0) + %vec = call @llvm.aarch64.sve.ld1.nxv16i8( %pred, i8* %addr) + %res = extractelement %vec, i32 0 + ret i8 %res +} + attributes #1 = { "target-features"="+sve" } declare @llvm.aarch64.sve.ptrue.nxv16i1(i32 %pattern)