diff --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp @@ -5092,12 +5092,17 @@ SDValue &OffImm) { const EVT MemVT = getMemVTFromNode(*(CurDAG->getContext()), Root); const DataLayout &DL = CurDAG->getDataLayout(); + const MachineFrameInfo &MFI = MF->getFrameInfo(); if (N.getOpcode() == ISD::FrameIndex) { int FI = cast(N)->getIndex(); - Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL)); - OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i64); - return true; + // We can only encode VL scaled offsets, so only fold in frame indexes + // referencing SVE objects. + if (FI == 0 || MFI.getStackID(FI) == TargetStackID::ScalableVector) { + Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL)); + OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i64); + return true; + } } if (MemVT == EVT()) @@ -5124,7 +5129,10 @@ Base = N.getOperand(0); if (Base.getOpcode() == ISD::FrameIndex) { int FI = cast(Base)->getIndex(); - Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL)); + // We can only encode VL scaled offsets, so only fold in frame indexes + // referencing SVE objects. + if (FI == 0 || MFI.getStackID(FI) == TargetStackID::ScalableVector) + Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL)); } OffImm = CurDAG->getTargetConstant(Offset, SDLoc(N), MVT::i64); diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-frame-offests.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-frame-offests.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-frame-offests.ll @@ -0,0 +1,25 @@ +; RUN: llc -debug < %s 2>&1 | FileCheck %s + +; REQUIRES: asserts + +target triple = "aarch64-unknown-linux-gnu" + +; CHECK-LABEL: Instruction selection ends: + +; CHECK: t{{[0-9]+}}: ch = ST1D_IMM t{{[0-9]+}}, t{{[0-9]+}}, TargetFrameIndex:i64<0>, TargetConstant:i64<0> +; CHECK: [[ADD:t[0-9]+]]: i64 = ADDXri TargetFrameIndex:i64<1>, TargetConstant:i32<0>, TargetConstant:i32<0> +; CHECK: t{{[0-9]+}}: ch = ST1D_IMM t{{[0-9]+}}, t{{[0-9]+}}, [[ADD]], TargetConstant:i64<0> + +; Ensure that only no offset frame indexes are folded into SVE load/stores when +; accessing fixed width objects. +define void @foo(<8 x i64>* %a) #0 { +entry: + %r0 = alloca <8 x i64> + %r1 = alloca <8 x i64> + %r = load volatile <8 x i64>, <8 x i64>* %a + store volatile <8 x i64> %r, <8 x i64>* %r0 + store volatile <8 x i64> %r, <8 x i64>* %r1 + ret void +} + +attributes #0 = { nounwind "target-features"="+sve" vscale_range(4,4) }