diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp --- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp +++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp @@ -4551,8 +4551,10 @@ // doesn't happen. LHSPtrAdd->moveBefore(&MI); Register RHSReg = MI.getOffsetReg(); + // set VReg will cause type mismatch if it comes from extend/trunc + auto NewCst = B.buildConstant(MRI.getType(RHSReg), LHSCstOff->Value); Observer.changingInstr(MI); - MI.getOperand(2).setReg(LHSCstOff->VReg); + MI.getOperand(2).setReg(NewCst.getReg(0)); Observer.changedInstr(MI); Observer.changingInstr(*LHSPtrAdd); LHSPtrAdd->getOperand(2).setReg(RHSReg); diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-ptradd-chain.mir b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-ptradd-chain.mir --- a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-ptradd-chain.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-ptradd-chain.mir @@ -1,5 +1,5 @@ # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py -# RUN: llc -mtriple aarch64-apple-ios -run-pass=aarch64-prelegalizer-combiner --aarch64prelegalizercombinerhelper-only-enable-rule="ptr_add_immed_chain" %s -o - -verify-machineinstrs | FileCheck %s +# RUN: llc -mtriple aarch64-apple-ios -run-pass=aarch64-prelegalizer-combiner %s -o - -verify-machineinstrs | FileCheck %s # REQUIRES: asserts # Check that we fold two adds of constant offsets with G_PTR_ADD into a single G_PTR_ADD. @@ -105,3 +105,32 @@ $x1 = COPY %ld_other(s64) RET_ReallyLR implicit $x0 ... +--- +# PR58906 +name: ptradd_constant_type_mismatch +tracksRegLiveness: true +body: | + bb.1: + liveins: $x0, $x1 + + ; CHECK-LABEL: name: ptradd_constant_type_mismatch + ; CHECK: liveins: $x0, $x1 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1 + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[COPY1]](s64) + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 + ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s64) + ; CHECK-NEXT: %ld:_(s64) = G_LOAD [[PTR_ADD1]](p0) :: (load (s64)) + ; CHECK-NEXT: $x0 = COPY %ld(s64) + ; CHECK-NEXT: RET_ReallyLR implicit $x0 + %0:_(p0) = COPY $x0 + %1:_(s64) = COPY $x1 + %2:_(s32) = G_CONSTANT i32 8 + %3:_(s64) = G_ZEXT %2:_(s32) + %4:_(p0) = G_PTR_ADD %0:_, %3:_(s64) + %5:_(p0) = G_PTR_ADD %4:_, %1:_(s64) + %ld:_(s64) = G_LOAD %5(p0) :: (load 8) + $x0 = COPY %ld(s64) + RET_ReallyLR implicit $x0 +...