diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -21940,8 +21940,10 @@ RHSC = -(uint64_t)RHSC; if (!isInt<9>(RHSC)) return false; - IsInc = (Op->getOpcode() == ISD::ADD); - Offset = Op->getOperand(1); + // Always emit pre-inc/post-inc addressing mode. Constant offset is already + // negated when dealing with subtraction. + IsInc = true; + Offset = DAG.getConstant(RHSC, SDLoc(N), RHS->getValueType(0)); return true; } return false; diff --git a/llvm/test/CodeGen/AArch64/pre-indexed-addrmode-with-constant-offset.ll b/llvm/test/CodeGen/AArch64/pre-indexed-addrmode-with-constant-offset.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/pre-indexed-addrmode-with-constant-offset.ll @@ -0,0 +1,24 @@ +; RUN: llc -mtriple=aarch64-linux-gnu < %s | FileCheck %s + +; Reduced test from https://github.com/llvm/llvm-project/issues/60645. +; To check that we are generating -32 as offset for the first store. + +define i8* @pr60645(i8* %ptr, i64 %t0) { +; CHECK-LABEL: pr60645: +; CHECK: // %bb.0: +; CHECK-NEXT: sub x8, x0, x1, lsl #2 +; CHECK-NEXT: str wzr, [x8, #-32]! +; CHECK-NEXT: stur wzr, [x8, #-8] +; CHECK-NEXT: ret + %t1 = add nuw nsw i64 %t0, 8 + %t2 = mul i64 %t1, -4 + %t3 = getelementptr i8, i8* %ptr, i64 %t2 + %t4 = bitcast i8* %t3 to i32* + store i32 0, i32* %t4, align 4 + %t5 = shl i64 %t1, 2 + %t6 = sub nuw nsw i64 -8, %t5 + %t7 = getelementptr i8, i8* %ptr, i64 %t6 + %t8 = bitcast i8* %t7 to i32* + store i32 0, i32* %t8, align 4 + ret i8* %ptr +}