Index: llvm/include/llvm/CodeGen/TargetLowering.h =================================================================== --- llvm/include/llvm/CodeGen/TargetLowering.h +++ llvm/include/llvm/CodeGen/TargetLowering.h @@ -516,6 +516,10 @@ return PredictableSelectIsExpensive; } + virtual bool fallBackToDAGISel(const Instruction &Inst) const { + return false; + } + /// If a branch or a select condition is skewed in one direction by more than /// this factor, it is very likely to be predicted correctly. virtual BranchProbability getPredictableBranchThreshold() const; Index: llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp =================================================================== --- llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp +++ llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp @@ -2195,6 +2195,10 @@ else EntryBuilder->setDebugLoc(DebugLoc()); + auto &TLI = *MF->getSubtarget().getTargetLowering(); + if (TLI.fallBackToDAGISel(Inst)) + return false; + switch (Inst.getOpcode()) { #define HANDLE_INST(NUM, OPCODE, CLASS) \ case Instruction::OPCODE: \ Index: llvm/lib/Target/AArch64/AArch64ISelLowering.h =================================================================== --- llvm/lib/Target/AArch64/AArch64ISelLowering.h +++ llvm/lib/Target/AArch64/AArch64ISelLowering.h @@ -702,6 +702,9 @@ bool isVarArg) const override; /// Used for exception handling on Win64. bool needsFixedCatchObjects() const override; + + bool fallBackToDAGISel(const Instruction &Inst) const override; + private: /// Keep a pointer to the AArch64Subtarget around so that we can /// make the right decision when generating code for different targets. Index: llvm/lib/Target/AArch64/AArch64ISelLowering.cpp =================================================================== --- llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -14591,3 +14591,14 @@ } return TargetLoweringBase::shouldLocalize(MI, TTI); } + +bool AArch64TargetLowering::fallBackToDAGISel(const Instruction &Inst) const { + if (isa(Inst.getType())) + return true; + + for (unsigned i = 0; i < Inst.getNumOperands(); ++i) + if (isa(Inst.getOperand(i)->getType())) + return true; + + return false; +} Index: llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp =================================================================== --- llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp +++ llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp @@ -438,6 +438,9 @@ SmallVector SplitArgs; unsigned i = 0; for (auto &Arg : F.args()) { + if (isa(Arg.getType())) + return false; + if (DL.getTypeStoreSize(Arg.getType()).isZero()) continue; Index: llvm/test/CodeGen/AArch64/arm64-fallbacks.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AArch64/arm64-fallbacks.ll @@ -0,0 +1,22 @@ +; RUN: llc -O0 -pass-remarks-missed=gisel -mtriple=aarch64-linux-gnu -mattr=+sve <%s 2>%t | FileCheck %s +; RUN: FileCheck --check-prefix=REMARKS %s <%t + +define @scalable_arg( %pred, i8* %addr) { +; REMARKS: unable to lower arguments{{.*}}scalable_arg +; CHECK-LABEL: scalable_arg: +; CHECK: ld1b { z0.b }, p0/z, [x0] + %res = call @llvm.aarch64.sve.ld1.nxv16i8( %pred, i8* %addr) + ret %res +} + +define @scalable_call(i8* %addr) { +; REMARKS: unable to translate instruction{{.*}}scalable_call +; CHECK-LABEL: scalable_call: +; CHECK: ld1b { z0.b }, p0/z, [x0] + %pred = call @llvm.aarch64.sve.ptrue.nxv16i1(i32 0) + %res = call @llvm.aarch64.sve.ld1.nxv16i8( %pred, i8* %addr) + ret %res +} + +declare @llvm.aarch64.sve.ptrue.nxv16i1(i32 %pattern) +declare @llvm.aarch64.sve.ld1.nxv16i8(, i8*) Index: llvm/test/CodeGen/AArch64/sve-intrinsics-ld1.ll =================================================================== --- llvm/test/CodeGen/AArch64/sve-intrinsics-ld1.ll +++ llvm/test/CodeGen/AArch64/sve-intrinsics-ld1.ll @@ -1,4 +1,5 @@ ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s +; RUN: llc -O0 -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s ; ; LD1B Index: llvm/test/CodeGen/AArch64/sve-intrinsics-st1.ll =================================================================== --- llvm/test/CodeGen/AArch64/sve-intrinsics-st1.ll +++ llvm/test/CodeGen/AArch64/sve-intrinsics-st1.ll @@ -1,4 +1,5 @@ ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s +; RUN: llc -O0 -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s ; ; ST1B