diff --git a/llvm/include/llvm/Target/GlobalISel/Combine.td b/llvm/include/llvm/Target/GlobalISel/Combine.td --- a/llvm/include/llvm/Target/GlobalISel/Combine.td +++ b/llvm/include/llvm/Target/GlobalISel/Combine.td @@ -235,6 +235,12 @@ [{ return Helper.matchOperandIsUndef(*${root}, 1); }]), (apply [{ Helper.replaceInstWithConstant(*${root}, 0); }])>; +def binop_right_undef_to_undef: GICombineRule< + (defs root:$root), + (match (wip_match_opcode G_SHL, G_ASHR, G_LSHR):$root, + [{ return Helper.matchOperandIsUndef(*${root}, 2); }]), + (apply [{ Helper.replaceInstWithUndef(*${root}); }])>; + // Instructions where if any source operand is undef, the instruction can be // replaced with undef. def propagate_undef_any_op: GICombineRule< @@ -889,6 +895,7 @@ def undef_combines : GICombineGroup<[undef_to_fp_zero, undef_to_int_zero, undef_to_negative_one, binop_left_undef_to_zero, + binop_right_undef_to_undef, propagate_undef_any_op, propagate_undef_all_ops, propagate_undef_shuffle_mask, diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-undef.mir b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-undef.mir --- a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-undef.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-undef.mir @@ -226,3 +226,96 @@ %0:_(<2 x s32>) = G_SHUFFLE_VECTOR %1(<2 x s32>), %2(<2 x s32>), shufflemask(0, 1) $d0 = COPY %0(<2 x s32>) RET_ReallyLR implicit $d0 + +... +--- +name: shl_undef_rhs +alignment: 4 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: shl_undef_rhs + ; CHECK: [[DEF:%[0-9]+]]:_(s64) = G_IMPLICIT_DEF + ; CHECK-NEXT: $x0 = COPY [[DEF]](s64) + ; CHECK-NEXT: RET_ReallyLR implicit $x0 + %0:_(s64) = G_CONSTANT i64 10 + %1:_(s64) = G_IMPLICIT_DEF + %2:_(s64) = G_SHL %0, %1 + $x0 = COPY %2(s64) + RET_ReallyLR implicit $x0 + +... +--- +name: lshr_undef_rhs +alignment: 4 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: lshr_undef_rhs + ; CHECK: [[DEF:%[0-9]+]]:_(s64) = G_IMPLICIT_DEF + ; CHECK-NEXT: $x0 = COPY [[DEF]](s64) + ; CHECK-NEXT: RET_ReallyLR implicit $x0 + %0:_(s64) = G_CONSTANT i64 10 + %1:_(s64) = G_IMPLICIT_DEF + %2:_(s64) = G_LSHR %0, %1 + $x0 = COPY %2(s64) + RET_ReallyLR implicit $x0 + +... +--- +name: ashr_undef_rhs +alignment: 4 +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: ashr_undef_rhs + ; CHECK: [[DEF:%[0-9]+]]:_(s64) = G_IMPLICIT_DEF + ; CHECK-NEXT: $x0 = COPY [[DEF]](s64) + ; CHECK-NEXT: RET_ReallyLR implicit $x0 + %0:_(s64) = G_CONSTANT i64 10 + %1:_(s64) = G_IMPLICIT_DEF + %2:_(s64) = G_ASHR %0, %1 + $x0 = COPY %2(s64) + RET_ReallyLR implicit $x0 + +... +--- +name: lshr_undef_lhs +alignment: 4 +tracksRegLiveness: true +body: | + bb.0: + ; Optimize these to zero? + ; CHECK-LABEL: name: lshr_undef_lhs + ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 10 + ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(s64) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[DEF]], [[C]](s64) + ; CHECK-NEXT: $x0 = COPY [[LSHR]](s64) + ; CHECK-NEXT: RET_ReallyLR implicit $x0 + %0:_(s64) = G_CONSTANT i64 10 + %1:_(s64) = G_IMPLICIT_DEF + %2:_(s64) = G_LSHR %1, %0 + $x0 = COPY %2(s64) + RET_ReallyLR implicit $x0 + +... +--- +name: ashr_undef_lhs +alignment: 4 +tracksRegLiveness: true +body: | + bb.0: + ; Optimize these to zero? + ; CHECK-LABEL: name: ashr_undef_lhs + ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 10 + ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(s64) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[DEF]], [[C]](s64) + ; CHECK-NEXT: $x0 = COPY [[ASHR]](s64) + ; CHECK-NEXT: RET_ReallyLR implicit $x0 + %0:_(s64) = G_CONSTANT i64 10 + %1:_(s64) = G_IMPLICIT_DEF + %2:_(s64) = G_ASHR %1, %0 + $x0 = COPY %2(s64) + RET_ReallyLR implicit $x0 + +...