diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp --- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp +++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp @@ -1710,13 +1710,6 @@ if (!MaybeImmVal) return false; - // Don't do this combine if there multiple uses of the first PTR_ADD, - // since we may be able to compute the second PTR_ADD as an immediate - // offset anyway. Folding the first offset into the second may cause us - // to go beyond the bounds of our legal addressing modes. - if (!MRI.hasOneNonDBGUse(Add2)) - return false; - MachineInstr *Add2Def = MRI.getUniqueVRegDef(Add2); if (!Add2Def || Add2Def->getOpcode() != TargetOpcode::G_PTR_ADD) return false; @@ -1727,8 +1720,36 @@ if (!MaybeImm2Val) return false; + // Check if the new combined immediate forms an illegal addressing mode. + // Do not combine if it was legal before but would get illegal. + // To do so, we need to find a load/store user of the pointer to get + // the access type. + Type *AccessTy = nullptr; + auto &MF = *MI.getMF(); + for (auto &UseMI : MRI.use_nodbg_instructions(MI.getOperand(0).getReg())) { + if (auto *LdSt = dyn_cast(&UseMI)) { + AccessTy = getTypeForLLT(MRI.getType(LdSt->getReg(0)), + MF.getFunction().getContext()); + break; + } + } + TargetLoweringBase::AddrMode AMNew; + APInt CombinedImm = MaybeImmVal->Value + MaybeImm2Val->Value; + AMNew.BaseOffs = CombinedImm.getSExtValue(); + if (AccessTy) { + AMNew.HasBaseReg = true; + TargetLoweringBase::AddrMode AMOld; + AMOld.BaseOffs = MaybeImm2Val->Value.getSExtValue(); + AMOld.HasBaseReg = true; + unsigned AS = MRI.getType(Add2).getAddressSpace(); + const auto &TLI = *MF.getSubtarget().getTargetLowering(); + if (TLI.isLegalAddressingMode(MF.getDataLayout(), AMOld, AccessTy, AS) && + !TLI.isLegalAddressingMode(MF.getDataLayout(), AMNew, AccessTy, AS)) + return false; + } + // Pass the combined immediate to the apply function. - MatchInfo.Imm = (MaybeImmVal->Value + MaybeImm2Val->Value).getSExtValue(); + MatchInfo.Imm = AMNew.BaseOffs; MatchInfo.Base = Base; return true; } diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/combine-ptradd-reassociation.mir b/llvm/test/CodeGen/AArch64/GlobalISel/combine-ptradd-reassociation.mir --- a/llvm/test/CodeGen/AArch64/GlobalISel/combine-ptradd-reassociation.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/combine-ptradd-reassociation.mir @@ -13,7 +13,7 @@ ; CHECK-LABEL: name: test1_noreassoc_legal_already_new_is_illegal ; CHECK: liveins: $x0 ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 - ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4777 + ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1600 ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) @@ -23,7 +23,7 @@ ; CHECK: $w0 = COPY [[LOAD]](s32) ; CHECK: RET_ReallyLR implicit $w0 %0:_(p0) = COPY $x0 - %2:_(s64) = G_CONSTANT i64 4777 + %2:_(s64) = G_CONSTANT i64 1600 %4:_(s64) = G_CONSTANT i64 6 %9:_(s32) = G_CONSTANT i32 0 %10:_(p0) = G_PTR_ADD %0, %2(s64) @@ -161,7 +161,7 @@ ; CHECK-LABEL: name: walk_through_inttoptr ; CHECK: liveins: $x0 ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 - ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4777 + ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1600 ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 ; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) @@ -173,7 +173,7 @@ ; CHECK: $w0 = COPY [[LOAD]](s32) ; CHECK: RET_ReallyLR implicit $w0 %0:_(p0) = COPY $x0 - %2:_(s64) = G_CONSTANT i64 4777 + %2:_(s64) = G_CONSTANT i64 1600 %4:_(s64) = G_CONSTANT i64 6 %9:_(s32) = G_CONSTANT i32 0 %10:_(p0) = G_PTR_ADD %0, %2(s64) diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-ptradd-chain.mir b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-ptradd-chain.mir --- a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-ptradd-chain.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-ptradd-chain.mir @@ -1,5 +1,6 @@ # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py -# RUN: llc -mtriple aarch64-apple-ios -run-pass=aarch64-prelegalizer-combiner %s -o - -verify-machineinstrs | FileCheck %s +# RUN: llc -mtriple aarch64-apple-ios -run-pass=aarch64-prelegalizer-combiner --aarch64prelegalizercombinerhelper-only-enable-rule="ptr_add_immed_chain" %s -o - -verify-machineinstrs | FileCheck %s +# REQUIRES: asserts # Check that we fold two adds of constant offsets with G_PTR_ADD into a single G_PTR_ADD. --- @@ -70,3 +71,33 @@ $x0 = COPY %5(p0) RET_ReallyLR implicit $x0 ... +--- +name: ptradd_would_form_illegal_load_addressing +tracksRegLiveness: true +body: | + bb.1: + liveins: $x0 + + ; CHECK-LABEL: name: ptradd_would_form_illegal_load_addressing + ; CHECK: liveins: $x0 + ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 + ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 + ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4096 + ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) + ; CHECK: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C1]](s64) + ; CHECK: %ld:_(s64) = G_LOAD [[PTR_ADD1]](p0) :: (load (s64)) + ; CHECK: %ld_other:_(s64) = G_LOAD [[PTR_ADD]](p0) :: (load (s64)) + ; CHECK: $x0 = COPY %ld(s64) + ; CHECK: $x1 = COPY %ld_other(s64) + ; CHECK: RET_ReallyLR implicit $x0 + %0:_(p0) = COPY $x0 + %1:_(s64) = G_CONSTANT i64 4 + %2:_(s64) = G_CONSTANT i64 4096 + %3:_(p0) = G_PTR_ADD %0(p0), %1 + %4:_(p0) = G_PTR_ADD %3(p0), %2 + %ld:_(s64) = G_LOAD %4(p0) :: (load 8) + %ld_other:_(s64) = G_LOAD %3(p0) :: (load 8) + $x0 = COPY %ld(s64) + $x1 = COPY %ld_other(s64) + RET_ReallyLR implicit $x0 +...