diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp --- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp +++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp @@ -2340,9 +2340,9 @@ bool CombinerHelper::matchConstantOp(const MachineOperand &MOP, int64_t C) { if (!MOP.isReg()) return false; - // MIPatternMatch doesn't let us look through G_ZEXT etc. - auto ValAndVReg = getIConstantVRegValWithLookThrough(MOP.getReg(), MRI); - return ValAndVReg && ValAndVReg->Value == C; + auto *MI = MRI.getVRegDef(MOP.getReg()); + auto MaybeCst = isConstantOrConstantSplatVector(*MI, MRI); + return MaybeCst.hasValue() && *MaybeCst == C; } bool CombinerHelper::replaceSingleDefInstWithOperand(MachineInstr &MI, diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/combine-mul.mir b/llvm/test/CodeGen/AArch64/GlobalISel/combine-mul.mir --- a/llvm/test/CodeGen/AArch64/GlobalISel/combine-mul.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/combine-mul.mir @@ -13,8 +13,9 @@ liveins: $x0 ; CHECK-LABEL: name: mul_by_zero ; CHECK: liveins: $x0 - ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; CHECK: $x0 = COPY [[C]](s64) + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: $x0 = COPY [[C]](s64) %0:_(s64) = COPY $x0 %1:_(s64) = G_CONSTANT i64 0 %2:_(s64) = G_MUL %0, %1(s64) @@ -30,14 +31,12 @@ body: | bb.0: liveins: $q0 - ; Currently not implemented. ; CHECK-LABEL: name: mul_vector_by_zero ; CHECK: liveins: $q0 - ; CHECK: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0 - ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 - ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32) - ; CHECK: [[MUL:%[0-9]+]]:_(<4 x s32>) = G_MUL [[COPY]], [[BUILD_VECTOR]] - ; CHECK: $q0 = COPY [[MUL]](<4 x s32>) + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32) + ; CHECK-NEXT: $q0 = COPY [[BUILD_VECTOR]](<4 x s32>) %0:_(<4 x s32>) = COPY $q0 %1:_(s32) = G_CONSTANT i32 0 %2:_(<4 x s32>) = G_BUILD_VECTOR %1(s32), %1(s32), %1(s32), %1(s32) @@ -56,8 +55,9 @@ liveins: $x0 ; CHECK-LABEL: name: mul_by_one ; CHECK: liveins: $x0 - ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0 - ; CHECK: $x0 = COPY [[COPY]](s64) + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0 + ; CHECK-NEXT: $x0 = COPY [[COPY]](s64) %0:_(s64) = COPY $x0 %1:_(s64) = G_CONSTANT i64 1 %2:_(s64) = G_MUL %0, %1(s64) @@ -73,14 +73,11 @@ body: | bb.0: liveins: $q0 - ; Currently not implemented. ; CHECK-LABEL: name: mul_vector_by_one ; CHECK: liveins: $q0 - ; CHECK: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0 - ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 - ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32) - ; CHECK: [[MUL:%[0-9]+]]:_(<4 x s32>) = G_MUL [[COPY]], [[BUILD_VECTOR]] - ; CHECK: $q0 = COPY [[MUL]](<4 x s32>) + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0 + ; CHECK-NEXT: $q0 = COPY [[COPY]](<4 x s32>) %0:_(<4 x s32>) = COPY $q0 %1:_(s32) = G_CONSTANT i32 1 %2:_(<4 x s32>) = G_BUILD_VECTOR %1(s32), %1(s32), %1(s32), %1(s32) @@ -99,10 +96,11 @@ liveins: $x0 ; CHECK-LABEL: name: mul_by_neg_one ; CHECK: liveins: $x0 - ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0 - ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; CHECK: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[C]], [[COPY]] - ; CHECK: $x0 = COPY [[SUB]](s64) + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0 + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[C]], [[COPY]] + ; CHECK-NEXT: $x0 = COPY [[SUB]](s64) %0:_(s64) = COPY $x0 %1:_(s64) = G_CONSTANT i64 -1 %2:_(s64) = G_MUL %0, %1(s64) @@ -118,14 +116,14 @@ body: | bb.0: liveins: $q0 - ; Currently not implemented. ; CHECK-LABEL: name: mul_vector_by_neg_one ; CHECK: liveins: $q0 - ; CHECK: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0 - ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1 - ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32) - ; CHECK: [[MUL:%[0-9]+]]:_(<4 x s32>) = G_MUL [[COPY]], [[BUILD_VECTOR]] - ; CHECK: $q0 = COPY [[MUL]](<4 x s32>) + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0 + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32) + ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<4 x s32>) = G_SUB [[BUILD_VECTOR]], [[COPY]] + ; CHECK-NEXT: $q0 = COPY [[SUB]](<4 x s32>) %0:_(<4 x s32>) = COPY $q0 %1:_(s32) = G_CONSTANT i32 -1 %2:_(<4 x s32>) = G_BUILD_VECTOR %1(s32), %1(s32), %1(s32), %1(s32) diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-trivial-arith.mir b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-trivial-arith.mir --- a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-trivial-arith.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-trivial-arith.mir @@ -30,9 +30,10 @@ ; ; CHECK-LABEL: name: right_ident_add ; CHECK: liveins: $w0 - ; CHECK: %x:_(s32) = COPY $w0 - ; CHECK: $w0 = COPY %x(s32) - ; CHECK: RET_ReallyLR implicit $w0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: %x:_(s32) = COPY $w0 + ; CHECK-NEXT: $w0 = COPY %x(s32) + ; CHECK-NEXT: RET_ReallyLR implicit $w0 %x:_(s32) = COPY $w0 %cst:_(s32) = G_CONSTANT i32 0 %op:_(s32) = G_ADD %x(s32), %cst @@ -50,9 +51,10 @@ ; ; CHECK-LABEL: name: mul_0 ; CHECK: liveins: $w0 - ; CHECK: %cst:_(s32) = G_CONSTANT i32 0 - ; CHECK: $w0 = COPY %cst(s32) - ; CHECK: RET_ReallyLR implicit $w0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: %cst:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: $w0 = COPY %cst(s32) + ; CHECK-NEXT: RET_ReallyLR implicit $w0 %x:_(s32) = COPY $w0 %cst:_(s32) = G_CONSTANT i32 0 %op:_(s32) = G_MUL %x(s32), %cst @@ -72,11 +74,12 @@ ; ; CHECK-LABEL: name: mul_0_cant_replace ; CHECK: liveins: $w0 - ; CHECK: %x:_(s32) = COPY $w0 - ; CHECK: %cst:_(s32) = G_CONSTANT i32 0 - ; CHECK: %op:gpr(s32) = G_MUL %x, %cst - ; CHECK: $w0 = COPY %op(s32) - ; CHECK: RET_ReallyLR implicit $w0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: %x:_(s32) = COPY $w0 + ; CHECK-NEXT: %cst:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: %op:gpr(s32) = G_MUL %x, %cst + ; CHECK-NEXT: $w0 = COPY %op(s32) + ; CHECK-NEXT: RET_ReallyLR implicit $w0 %x:_(s32) = COPY $w0 %cst:_(s32) = G_CONSTANT i32 0 %op:gpr(s32) = G_MUL %x(s32), %cst @@ -95,9 +98,10 @@ ; ; CHECK-LABEL: name: sdiv_0 ; CHECK: liveins: $w0 - ; CHECK: %cst:_(s32) = G_CONSTANT i32 0 - ; CHECK: $w0 = COPY %cst(s32) - ; CHECK: RET_ReallyLR implicit $w0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: %cst:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: $w0 = COPY %cst(s32) + ; CHECK-NEXT: RET_ReallyLR implicit $w0 %x:_(s32) = COPY $w0 %cst:_(s32) = G_CONSTANT i32 0 %op:_(s32) = G_SDIV %cst, %x @@ -115,9 +119,10 @@ ; ; CHECK-LABEL: name: udiv_0 ; CHECK: liveins: $w0 - ; CHECK: %cst:_(s32) = G_CONSTANT i32 0 - ; CHECK: $w0 = COPY %cst(s32) - ; CHECK: RET_ReallyLR implicit $w0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: %cst:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: $w0 = COPY %cst(s32) + ; CHECK-NEXT: RET_ReallyLR implicit $w0 %x:_(s32) = COPY $w0 %cst:_(s32) = G_CONSTANT i32 0 %op:_(s32) = G_UDIV %cst, %x @@ -135,9 +140,10 @@ ; ; CHECK-LABEL: name: srem_0 ; CHECK: liveins: $w0 - ; CHECK: %cst:_(s32) = G_CONSTANT i32 0 - ; CHECK: $w0 = COPY %cst(s32) - ; CHECK: RET_ReallyLR implicit $w0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: %cst:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: $w0 = COPY %cst(s32) + ; CHECK-NEXT: RET_ReallyLR implicit $w0 %x:_(s32) = COPY $w0 %cst:_(s32) = G_CONSTANT i32 0 %op:_(s32) = G_SREM %cst, %x @@ -155,9 +161,10 @@ ; ; CHECK-LABEL: name: urem_0 ; CHECK: liveins: $w0 - ; CHECK: %cst:_(s32) = G_CONSTANT i32 0 - ; CHECK: $w0 = COPY %cst(s32) - ; CHECK: RET_ReallyLR implicit $w0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: %cst:_(s32) = G_CONSTANT i32 0 + ; CHECK-NEXT: $w0 = COPY %cst(s32) + ; CHECK-NEXT: RET_ReallyLR implicit $w0 %x:_(s32) = COPY $w0 %cst:_(s32) = G_CONSTANT i32 0 %op:_(s32) = G_UREM %cst, %x @@ -176,9 +183,10 @@ ; ; CHECK-LABEL: name: right_ident_or ; CHECK: liveins: $w0 - ; CHECK: %x:_(s32) = COPY $w0 - ; CHECK: $w0 = COPY %x(s32) - ; CHECK: RET_ReallyLR implicit $w0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: %x:_(s32) = COPY $w0 + ; CHECK-NEXT: $w0 = COPY %x(s32) + ; CHECK-NEXT: RET_ReallyLR implicit $w0 %x:_(s32) = COPY $w0 %cst:_(s32) = G_CONSTANT i32 0 %op:_(s32) = G_OR %x(s32), %cst @@ -196,9 +204,10 @@ ; ; CHECK-LABEL: name: right_ident_xor ; CHECK: liveins: $w0 - ; CHECK: %x:_(s32) = COPY $w0 - ; CHECK: $w0 = COPY %x(s32) - ; CHECK: RET_ReallyLR implicit $w0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: %x:_(s32) = COPY $w0 + ; CHECK-NEXT: $w0 = COPY %x(s32) + ; CHECK-NEXT: RET_ReallyLR implicit $w0 %x:_(s32) = COPY $w0 %cst:_(s32) = G_CONSTANT i32 0 %op:_(s32) = G_XOR %x(s32), %cst @@ -216,9 +225,10 @@ ; ; CHECK-LABEL: name: right_ident_shl ; CHECK: liveins: $w0 - ; CHECK: %x:_(s32) = COPY $w0 - ; CHECK: $w0 = COPY %x(s32) - ; CHECK: RET_ReallyLR implicit $w0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: %x:_(s32) = COPY $w0 + ; CHECK-NEXT: $w0 = COPY %x(s32) + ; CHECK-NEXT: RET_ReallyLR implicit $w0 %x:_(s32) = COPY $w0 %cst:_(s32) = G_CONSTANT i32 0 %op:_(s32) = G_SHL %x(s32), %cst @@ -236,9 +246,10 @@ ; ; CHECK-LABEL: name: right_ident_ashr ; CHECK: liveins: $w0 - ; CHECK: %x:_(s32) = COPY $w0 - ; CHECK: $w0 = COPY %x(s32) - ; CHECK: RET_ReallyLR implicit $w0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: %x:_(s32) = COPY $w0 + ; CHECK-NEXT: $w0 = COPY %x(s32) + ; CHECK-NEXT: RET_ReallyLR implicit $w0 %x:_(s32) = COPY $w0 %cst:_(s32) = G_CONSTANT i32 0 %op:_(s32) = G_ASHR %x(s32), %cst @@ -256,9 +267,10 @@ ; ; CHECK-LABEL: name: right_ident_lshr ; CHECK: liveins: $w0 - ; CHECK: %x:_(s32) = COPY $w0 - ; CHECK: $w0 = COPY %x(s32) - ; CHECK: RET_ReallyLR implicit $w0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: %x:_(s32) = COPY $w0 + ; CHECK-NEXT: $w0 = COPY %x(s32) + ; CHECK-NEXT: RET_ReallyLR implicit $w0 %x:_(s32) = COPY $w0 %cst:_(s32) = G_CONSTANT i32 0 %op:_(s32) = G_LSHR %x(s32), %cst @@ -276,11 +288,12 @@ ; ; CHECK-LABEL: name: dont_fold_sub ; CHECK: liveins: $w0 - ; CHECK: %x:_(s32) = COPY $w0 - ; CHECK: %cst:_(s32) = G_CONSTANT i32 1 - ; CHECK: %op:_(s32) = G_SUB %x, %cst - ; CHECK: $w0 = COPY %op(s32) - ; CHECK: RET_ReallyLR implicit $w0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: %x:_(s32) = COPY $w0 + ; CHECK-NEXT: %cst:_(s32) = G_CONSTANT i32 1 + ; CHECK-NEXT: %op:_(s32) = G_SUB %x, %cst + ; CHECK-NEXT: $w0 = COPY %op(s32) + ; CHECK-NEXT: RET_ReallyLR implicit $w0 %x:_(s32) = COPY $w0 %cst:_(s32) = G_CONSTANT i32 1 %op:_(s32) = G_SUB %x(s32), %cst @@ -296,10 +309,11 @@ liveins: $x0 ; CHECK-LABEL: name: look_through_zext ; CHECK: liveins: $x0 - ; CHECK: %zero:_(s8) = G_CONSTANT i8 0 - ; CHECK: %zext_zero:_(s64) = G_ZEXT %zero(s8) - ; CHECK: $x0 = COPY %zext_zero(s64) - ; CHECK: RET_ReallyLR implicit $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: %zero:_(s8) = G_CONSTANT i8 0 + ; CHECK-NEXT: %zext_zero:_(s64) = G_ZEXT %zero(s8) + ; CHECK-NEXT: $x0 = COPY %zext_zero(s64) + ; CHECK-NEXT: RET_ReallyLR implicit $x0 %zero:_(s8) = G_CONSTANT i8 0 %zext_zero:_(s64) = G_ZEXT %zero(s8) %c:_(s64) = G_CONSTANT i64 72340172838076673 @@ -317,9 +331,10 @@ ; ; CHECK-LABEL: name: right_ident_ptr_add ; CHECK: liveins: $x0 - ; CHECK: %x:_(p0) = COPY $x0 - ; CHECK: $x0 = COPY %x(p0) - ; CHECK: RET_ReallyLR implicit $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: %x:_(p0) = COPY $x0 + ; CHECK-NEXT: $x0 = COPY %x(p0) + ; CHECK-NEXT: RET_ReallyLR implicit $x0 %x:_(p0) = COPY $x0 %cst:_(s64) = G_CONSTANT i64 0 %op:_(p0) = G_PTR_ADD %x(p0), %cst @@ -334,9 +349,10 @@ liveins: $w0, $w1 ; CHECK-LABEL: name: right_identity_rotl ; CHECK: liveins: $w0, $w1 - ; CHECK: %copy:_(s32) = COPY $w0 - ; CHECK: $w0 = COPY %copy(s32) - ; CHECK: RET_ReallyLR implicit $w0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: %copy:_(s32) = COPY $w0 + ; CHECK-NEXT: $w0 = COPY %copy(s32) + ; CHECK-NEXT: RET_ReallyLR implicit $w0 %copy:_(s32) = COPY $w0 %zero:_(s32) = G_CONSTANT i32 0 %rot:_(s32) = G_ROTL %copy(s32), %zero(s32) @@ -352,12 +368,45 @@ liveins: $w0, $w1 ; CHECK-LABEL: name: right_identity_rotr ; CHECK: liveins: $w0, $w1 - ; CHECK: %copy:_(s32) = COPY $w0 - ; CHECK: $w0 = COPY %copy(s32) - ; CHECK: RET_ReallyLR implicit $w0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: %copy:_(s32) = COPY $w0 + ; CHECK-NEXT: $w0 = COPY %copy(s32) + ; CHECK-NEXT: RET_ReallyLR implicit $w0 %copy:_(s32) = COPY $w0 %zero:_(s32) = G_CONSTANT i32 0 %rot:_(s32) = G_ROTR %copy(s32), %zero(s32) $w0 = COPY %rot(s32) RET_ReallyLR implicit $w0 ... +--- +name: lshr_of_vec_zero +body: | + bb.1: + liveins: $q0 + ; CHECK-LABEL: name: lshr_of_vec_zero + ; CHECK: [[COPY:%[0-9]+]]:_(<8 x s16>) = COPY $q0 + ; CHECK-NEXT: $q0 = COPY [[COPY]](<8 x s16>) + ; CHECK-NEXT: RET_ReallyLR implicit $q0 + %0:_(<8 x s16>) = COPY $q0 + %5:_(s16) = G_CONSTANT i16 0 + %zero_vec:_(<8 x s16>) = G_BUILD_VECTOR %5(s16), %5(s16), %5(s16), %5(s16), %5(s16), %5(s16), %5(s16), %5(s16) + %shift:_(<8 x s16>) = G_LSHR %0, %zero_vec(<8 x s16>) + $q0 = COPY %shift(<8 x s16>) + RET_ReallyLR implicit $q0 +... +--- +name: ptradd_of_vec_zero +body: | + bb.1: + liveins: $q0 + ; CHECK-LABEL: name: ptradd_of_vec_zero + ; CHECK: [[COPY:%[0-9]+]]:_(<2 x p0>) = COPY $q0 + ; CHECK-NEXT: $q0 = COPY [[COPY]](<2 x p0>) + ; CHECK-NEXT: RET_ReallyLR implicit $q0 + %0:_(<2 x p0>) = COPY $q0 + %5:_(s64) = G_CONSTANT i64 0 + %zero_vec:_(<2 x s64>) = G_BUILD_VECTOR %5(s64), %5(s64) + %ptr:_(<2 x p0>) = G_PTR_ADD %0, %zero_vec(<2 x s64>) + $q0 = COPY %ptr(<2 x p0>) + RET_ReallyLR implicit $q0 +...