diff --git a/llvm/test/Transforms/InstCombine/sub-ashr-and-to-icmp-select.ll b/llvm/test/Transforms/InstCombine/sub-ashr-and-to-icmp-select.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/InstCombine/sub-ashr-and-to-icmp-select.ll @@ -0,0 +1,223 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -instcombine %s -S -o - | FileCheck %s + +; Clamp negative to zero: +; E.g., clamp0 implemented in a shifty way, could be optimized as v < 0 ? 0 : v, where sub hasNoSignedWrap. +; int32 clamp0(int32 v) { +; return ((-(v) >> 31) & (v)); +; } +; + +; Scalar Types + +define i8 @sub_ashr_and_i8(i8 %v, i8 %x) { +; CHECK-LABEL: @sub_ashr_and_i8( +; CHECK-NEXT: [[SUB:%.*]] = sub nsw i8 [[X:%.*]], [[V:%.*]] +; CHECK-NEXT: [[SHR:%.*]] = ashr i8 [[SUB]], 7 +; CHECK-NEXT: [[AND:%.*]] = and i8 [[SHR]], [[V]] +; CHECK-NEXT: ret i8 [[AND]] +; + %sub = sub nsw i8 %x, %v + %shr = ashr i8 %sub, 7 + %and = and i8 %shr, %v + ret i8 %and +} + +define i16 @sub_ashr_and_i16(i16 %v, i16 %x) { +; CHECK-LABEL: @sub_ashr_and_i16( +; CHECK-NEXT: [[SUB:%.*]] = sub nsw i16 [[X:%.*]], [[V:%.*]] +; CHECK-NEXT: [[SHR:%.*]] = ashr i16 [[SUB]], 15 +; CHECK-NEXT: [[AND:%.*]] = and i16 [[SHR]], [[V]] +; CHECK-NEXT: ret i16 [[AND]] +; + + %sub = sub nsw i16 %x, %v + %shr = ashr i16 %sub, 15 + %and = and i16 %shr, %v + ret i16 %and +} + +define i32 @sub_ashr_and_i32(i32 %v, i32 %x) { +; CHECK-LABEL: @sub_ashr_and_i32( +; CHECK-NEXT: [[SUB:%.*]] = sub nsw i32 [[X:%.*]], [[V:%.*]] +; CHECK-NEXT: [[SHR:%.*]] = ashr i32 [[SUB]], 31 +; CHECK-NEXT: [[AND:%.*]] = and i32 [[SHR]], [[V]] +; CHECK-NEXT: ret i32 [[AND]] +; + %sub = sub nsw i32 %x, %v + %shr = ashr i32 %sub, 31 + %and = and i32 %shr, %v + ret i32 %and +} + +define i64 @sub_ashr_and_i64(i64 %v, i64 %x) { +; CHECK-LABEL: @sub_ashr_and_i64( +; CHECK-NEXT: [[SUB:%.*]] = sub nsw i64 [[X:%.*]], [[V:%.*]] +; CHECK-NEXT: [[SHR:%.*]] = ashr i64 [[SUB]], 63 +; CHECK-NEXT: [[AND:%.*]] = and i64 [[SHR]], [[V]] +; CHECK-NEXT: ret i64 [[AND]] +; + %sub = sub nsw i64 %x, %v + %shr = ashr i64 %sub, 63 + %and = and i64 %shr, %v + ret i64 %and +} + +; nuw nsw + +define i32 @sub_ashr_and_i32_nuw_nsw(i32 %v, i32 %x) { +; CHECK-LABEL: @sub_ashr_and_i32_nuw_nsw( +; CHECK-NEXT: [[SUB:%.*]] = sub nuw nsw i32 [[X:%.*]], [[V:%.*]] +; CHECK-NEXT: [[SHR:%.*]] = ashr i32 [[SUB]], 31 +; CHECK-NEXT: [[AND:%.*]] = and i32 [[SHR]], [[V]] +; CHECK-NEXT: ret i32 [[AND]] +; + %sub = sub nuw nsw i32 %x, %v + %shr = ashr i32 %sub, 31 + %and = and i32 %shr, %v + ret i32 %and +} + +; Commute + +define i32 @sub_ashr_and_i32_commute(i32 %v, i32 %x) { +; CHECK-LABEL: @sub_ashr_and_i32_commute( +; CHECK-NEXT: [[SUB:%.*]] = sub nsw i32 [[X:%.*]], [[V:%.*]] +; CHECK-NEXT: [[SHR:%.*]] = ashr i32 [[SUB]], 31 +; CHECK-NEXT: [[AND:%.*]] = and i32 [[SHR]], [[V]] +; CHECK-NEXT: ret i32 [[AND]] +; + %sub = sub nsw i32 %x, %v + %shr = ashr i32 %sub, 31 + %and = and i32 %v, %shr ; commute %v and %shr + ret i32 %and +} + +; Vector Types + +define <4 x i32> @sub_ashr_and_i32_vec(<4 x i32> %v, <4 x i32> %x) { +; CHECK-LABEL: @sub_ashr_and_i32_vec( +; CHECK-NEXT: [[SUB:%.*]] = sub nsw <4 x i32> [[X:%.*]], [[V:%.*]] +; CHECK-NEXT: [[SHR:%.*]] = ashr <4 x i32> [[SUB]], +; CHECK-NEXT: [[AND:%.*]] = and <4 x i32> [[SHR]], [[V]] +; CHECK-NEXT: ret <4 x i32> [[AND]] +; + %sub = sub nsw <4 x i32> %x, %v + %shr = ashr <4 x i32> %sub, + %and = and <4 x i32> %shr, %v + ret <4 x i32> %and +} + +define <4 x i32> @sub_ashr_and_i32_vec_nuw_nsw(<4 x i32> %v, <4 x i32> %x) { +; CHECK-LABEL: @sub_ashr_and_i32_vec_nuw_nsw( +; CHECK-NEXT: [[SUB:%.*]] = sub nuw nsw <4 x i32> [[X:%.*]], [[V:%.*]] +; CHECK-NEXT: [[SHR:%.*]] = ashr <4 x i32> [[SUB]], +; CHECK-NEXT: [[AND:%.*]] = and <4 x i32> [[SHR]], [[V]] +; CHECK-NEXT: ret <4 x i32> [[AND]] +; + %sub = sub nuw nsw <4 x i32> %x, %v + %shr = ashr <4 x i32> %sub, + %and = and <4 x i32> %shr, %v + ret <4 x i32> %and +} + +define <4 x i32> @sub_ashr_and_i32_vec_commute(<4 x i32> %v, <4 x i32> %x) { +; CHECK-LABEL: @sub_ashr_and_i32_vec_commute( +; CHECK-NEXT: [[SUB:%.*]] = sub nsw <4 x i32> [[X:%.*]], [[V:%.*]] +; CHECK-NEXT: [[SHR:%.*]] = ashr <4 x i32> [[SUB]], +; CHECK-NEXT: [[AND:%.*]] = and <4 x i32> [[SHR]], [[V]] +; CHECK-NEXT: ret <4 x i32> [[AND]] +; + %sub = sub nsw <4 x i32> %x, %v + %shr = ashr <4 x i32> %sub, + %and = and <4 x i32> %v, %shr ; commute %v and %shr + ret <4 x i32> %and +} + +; Extra uses + +define i32 @sub_ashr_and_i32_extra_use_sub(i32 %v, i32 %x, i32* %p) { +; CHECK-LABEL: @sub_ashr_and_i32_extra_use_sub( +; CHECK-NEXT: [[SUB:%.*]] = sub nsw i32 [[X:%.*]], [[V:%.*]] +; CHECK-NEXT: store i32 [[SUB]], i32* [[P:%.*]], align 4 +; CHECK-NEXT: [[SHR:%.*]] = ashr i32 [[SUB]], 31 +; CHECK-NEXT: [[AND:%.*]] = and i32 [[SHR]], [[V]] +; CHECK-NEXT: ret i32 [[AND]] +; + %sub = sub nsw i32 %x, %v + store i32 %sub, i32* %p + %shr = ashr i32 %sub, 31 + %and = and i32 %shr, %v + ret i32 %and +} + +define i32 @sub_ashr_and_i32_extra_use_and(i32 %v, i32 %x, i32* %p) { +; CHECK-LABEL: @sub_ashr_and_i32_extra_use_and( +; CHECK-NEXT: [[SUB:%.*]] = sub nsw i32 [[X:%.*]], [[V:%.*]] +; CHECK-NEXT: [[SHR:%.*]] = ashr i32 [[SUB]], 31 +; CHECK-NEXT: [[AND:%.*]] = and i32 [[SHR]], [[V]] +; CHECK-NEXT: store i32 [[AND]], i32* [[P:%.*]], align 4 +; CHECK-NEXT: ret i32 [[AND]] +; + %sub = sub nsw i32 %x, %v + %shr = ashr i32 %sub, 31 + %and = and i32 %shr, %v + store i32 %and, i32* %p + ret i32 %and +} + +; Negative Tests + +define i32 @sub_ashr_and_i32_extra_use_ashr(i32 %v, i32 %x, i32* %p) { +; CHECK-LABEL: @sub_ashr_and_i32_extra_use_ashr( +; CHECK-NEXT: [[SUB:%.*]] = sub nsw i32 [[X:%.*]], [[V:%.*]] +; CHECK-NEXT: [[SHR:%.*]] = ashr i32 [[SUB]], 31 +; CHECK-NEXT: store i32 [[SHR]], i32* [[P:%.*]], align 4 +; CHECK-NEXT: [[AND:%.*]] = and i32 [[SHR]], [[V]] +; CHECK-NEXT: ret i32 [[AND]] +; + %sub = sub nsw i32 %x, %v + %shr = ashr i32 %sub, 31 + store i32 %shr, i32* %p + %and = and i32 %shr, %v + ret i32 %and +} + +define i32 @sub_ashr_and_i32_no_nuw_nsw(i32 %v, i32 %x) { +; CHECK-LABEL: @sub_ashr_and_i32_no_nuw_nsw( +; CHECK-NEXT: [[SUB:%.*]] = sub i32 [[X:%.*]], [[V:%.*]] +; CHECK-NEXT: [[SHR:%.*]] = ashr i32 [[SUB]], 7 +; CHECK-NEXT: [[AND:%.*]] = and i32 [[SHR]], [[V]] +; CHECK-NEXT: ret i32 [[AND]] +; + %sub = sub i32 %x, %v + %shr = ashr i32 %sub, 7 + %and = and i32 %shr, %v + ret i32 %and +} + +define <4 x i32> @sub_ashr_and_i32_vec_undef(<4 x i32> %v, <4 x i32> %x) { +; CHECK-LABEL: @sub_ashr_and_i32_vec_undef( +; CHECK-NEXT: [[SUB:%.*]] = sub nsw <4 x i32> [[X:%.*]], [[V:%.*]] +; CHECK-NEXT: [[SHR:%.*]] = ashr <4 x i32> [[SUB]], +; CHECK-NEXT: [[AND:%.*]] = and <4 x i32> [[SHR]], [[V]] +; CHECK-NEXT: ret <4 x i32> [[AND]] +; + %sub = sub nsw <4 x i32> %x, %v + %shr = ashr <4 x i32> %sub, + %and = and <4 x i32> %shr, %v + ret <4 x i32> %and +} + +define i32 @sub_ashr_and_i32_shift_wrong_bit(i32 %v, i32 %x) { +; CHECK-LABEL: @sub_ashr_and_i32_shift_wrong_bit( +; CHECK-NEXT: [[SUB:%.*]] = sub nsw i32 [[X:%.*]], [[V:%.*]] +; CHECK-NEXT: [[SHR:%.*]] = ashr i32 [[SUB]], 15 +; CHECK-NEXT: [[AND:%.*]] = and i32 [[SHR]], [[V]] +; CHECK-NEXT: ret i32 [[AND]] +; + %sub = sub nsw i32 %x, %v + %shr = ashr i32 %sub, 15 + %and = and i32 %shr, %v + ret i32 %and +} diff --git a/llvm/test/Transforms/InstCombine/sub-ashr-or-to-icmp-select.ll b/llvm/test/Transforms/InstCombine/sub-ashr-or-to-icmp-select.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/InstCombine/sub-ashr-or-to-icmp-select.ll @@ -0,0 +1,250 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -instcombine %s -S -o - | FileCheck %s + +; Clamp positive to allOnesValue: +; E.g., clamp255 implemented in a shifty way, could be optimized as v > 255 ? 255 : v, where sub hasNoSignedWrap. +; int32 clamp255(int32 v) { +; return (((255 - (v)) >> 31) | (v)) & 255; +; } +; + +; Scalar Types + +define i32 @clamp255_i32(i32 %v) { +; CHECK-LABEL: @clamp255_i32( +; CHECK-NEXT: [[SUB:%.*]] = sub nsw i32 255, [[V:%.*]] +; CHECK-NEXT: [[SHR:%.*]] = ashr i32 [[SUB]], 31 +; CHECK-NEXT: [[OR:%.*]] = or i32 [[SHR]], [[V]] +; CHECK-NEXT: [[AND:%.*]] = and i32 [[OR]], 255 +; CHECK-NEXT: ret i32 [[AND]] +; + %sub = sub nsw i32 255, %v + %shr = ashr i32 %sub, 31 + %or = or i32 %shr, %v + %and = and i32 %or, 255 + ret i32 %and +} + +define i8 @sub_ashr_or_i8(i8 %v, i8 %x) { +; CHECK-LABEL: @sub_ashr_or_i8( +; CHECK-NEXT: [[SUB:%.*]] = sub nsw i8 [[X:%.*]], [[V:%.*]] +; CHECK-NEXT: [[SHR:%.*]] = ashr i8 [[SUB]], 7 +; CHECK-NEXT: [[OR:%.*]] = or i8 [[SHR]], [[V]] +; CHECK-NEXT: ret i8 [[OR]] +; + %sub = sub nsw i8 %x, %v + %shr = ashr i8 %sub, 7 + %or = or i8 %shr, %v + ret i8 %or +} + +define i16 @sub_ashr_or_i16(i16 %v, i16 %x) { +; CHECK-LABEL: @sub_ashr_or_i16( +; CHECK-NEXT: [[SUB:%.*]] = sub nsw i16 [[X:%.*]], [[V:%.*]] +; CHECK-NEXT: [[SHR:%.*]] = ashr i16 [[SUB]], 15 +; CHECK-NEXT: [[OR:%.*]] = or i16 [[SHR]], [[V]] +; CHECK-NEXT: ret i16 [[OR]] +; + %sub = sub nsw i16 %x, %v + %shr = ashr i16 %sub, 15 + %or = or i16 %shr, %v + ret i16 %or +} + +define i32 @sub_ashr_or_i32(i32 %v, i32 %x) { +; CHECK-LABEL: @sub_ashr_or_i32( +; CHECK-NEXT: [[SUB:%.*]] = sub nsw i32 [[X:%.*]], [[V:%.*]] +; CHECK-NEXT: [[SHR:%.*]] = ashr i32 [[SUB]], 31 +; CHECK-NEXT: [[OR:%.*]] = or i32 [[SHR]], [[V]] +; CHECK-NEXT: ret i32 [[OR]] +; + %sub = sub nsw i32 %x, %v + %shr = ashr i32 %sub, 31 + %or = or i32 %shr, %v + ret i32 %or +} + +define i64 @sub_ashr_or_i64(i64 %v, i64 %x) { +; CHECK-LABEL: @sub_ashr_or_i64( +; CHECK-NEXT: [[SUB:%.*]] = sub nsw i64 [[X:%.*]], [[V:%.*]] +; CHECK-NEXT: [[SHR:%.*]] = ashr i64 [[SUB]], 63 +; CHECK-NEXT: [[OR:%.*]] = or i64 [[SHR]], [[V]] +; CHECK-NEXT: ret i64 [[OR]] +; + %sub = sub nsw i64 %x, %v + %shr = ashr i64 %sub, 63 + %or = or i64 %shr, %v + ret i64 %or +} + +; nuw nsw + +define i32 @sub_ashr_or_i32_nuw_nsw(i32 %v, i32 %x) { +; CHECK-LABEL: @sub_ashr_or_i32_nuw_nsw( +; CHECK-NEXT: [[SUB:%.*]] = sub nuw nsw i32 [[X:%.*]], [[V:%.*]] +; CHECK-NEXT: [[SHR:%.*]] = ashr i32 [[SUB]], 31 +; CHECK-NEXT: [[OR:%.*]] = or i32 [[SHR]], [[V]] +; CHECK-NEXT: ret i32 [[OR]] +; + %sub = sub nuw nsw i32 %x, %v + %shr = ashr i32 %sub, 31 + %or = or i32 %shr, %v + ret i32 %or +} + +; Commute + +define i32 @sub_ashr_or_i32_commute(i32 %v, i32 %x) { +; CHECK-LABEL: @sub_ashr_or_i32_commute( +; CHECK-NEXT: [[SUB:%.*]] = sub nsw i32 [[X:%.*]], [[V:%.*]] +; CHECK-NEXT: [[SHR:%.*]] = ashr i32 [[SUB]], 31 +; CHECK-NEXT: [[OR:%.*]] = or i32 [[SHR]], [[V]] +; CHECK-NEXT: ret i32 [[OR]] +; + %sub = sub nsw i32 %x, %v + %shr = ashr i32 %sub, 31 + %or = or i32 %v, %shr ; commute %shr and %v + ret i32 %or +} + +; Vector Types + +define <4 x i32> @sub_ashr_or_i32_vec(<4 x i32> %v, <4 x i32> %x) { +; CHECK-LABEL: @sub_ashr_or_i32_vec( +; CHECK-NEXT: [[SUB:%.*]] = sub nsw <4 x i32> [[X:%.*]], [[V:%.*]] +; CHECK-NEXT: [[SHR:%.*]] = ashr <4 x i32> [[SUB]], +; CHECK-NEXT: [[OR:%.*]] = or <4 x i32> [[SHR]], [[V]] +; CHECK-NEXT: ret <4 x i32> [[OR]] +; + %sub = sub nsw <4 x i32> %x, %v + %shr = ashr <4 x i32> %sub, + %or = or <4 x i32> %shr, %v + ret <4 x i32> %or +} + +define <4 x i32> @sub_ashr_or_i32_vec_nuw_nsw(<4 x i32> %v, <4 x i32> %x) { +; CHECK-LABEL: @sub_ashr_or_i32_vec_nuw_nsw( +; CHECK-NEXT: [[SUB:%.*]] = sub nuw nsw <4 x i32> [[X:%.*]], [[V:%.*]] +; CHECK-NEXT: [[SHR:%.*]] = ashr <4 x i32> [[SUB]], +; CHECK-NEXT: [[OR:%.*]] = or <4 x i32> [[SHR]], [[V]] +; CHECK-NEXT: ret <4 x i32> [[OR]] +; + %sub = sub nuw nsw <4 x i32> %x, %v + %shr = ashr <4 x i32> %sub, + %or = or <4 x i32> %shr, %v + ret <4 x i32> %or +} + +define <4 x i32> @sub_ashr_or_i32_vec_commute(<4 x i32> %v, <4 x i32> %x) { +; CHECK-LABEL: @sub_ashr_or_i32_vec_commute( +; CHECK-NEXT: [[SUB:%.*]] = sub nsw <4 x i32> [[X:%.*]], [[V:%.*]] +; CHECK-NEXT: [[SHR:%.*]] = ashr <4 x i32> [[SUB]], +; CHECK-NEXT: [[OR:%.*]] = or <4 x i32> [[SHR]], [[V]] +; CHECK-NEXT: ret <4 x i32> [[OR]] +; + %sub = sub nsw <4 x i32> %x, %v + %shr = ashr <4 x i32> %sub, + %or = or <4 x i32> %v, %shr + ret <4 x i32> %or +} + +; Extra uses + +define i32 @sub_ashr_or_i32_extra_use_sub(i32 %v, i32 %x, i32* %p) { +; CHECK-LABEL: @sub_ashr_or_i32_extra_use_sub( +; CHECK-NEXT: [[SUB:%.*]] = sub nsw i32 [[X:%.*]], [[V:%.*]] +; CHECK-NEXT: store i32 [[SUB]], i32* [[P:%.*]], align 4 +; CHECK-NEXT: [[SHR:%.*]] = ashr i32 [[SUB]], 31 +; CHECK-NEXT: [[OR:%.*]] = or i32 [[SHR]], [[V]] +; CHECK-NEXT: ret i32 [[OR]] +; + %sub = sub nsw i32 %x, %v + store i32 %sub, i32* %p + %shr = ashr i32 %sub, 31 + %or = or i32 %shr, %v + ret i32 %or +} + +define i32 @sub_ashr_or_i32_extra_use_or(i32 %v, i32 %x, i32* %p) { +; CHECK-LABEL: @sub_ashr_or_i32_extra_use_or( +; CHECK-NEXT: [[SUB:%.*]] = sub nsw i32 [[X:%.*]], [[V:%.*]] +; CHECK-NEXT: [[SHR:%.*]] = ashr i32 [[SUB]], 31 +; CHECK-NEXT: [[OR:%.*]] = or i32 [[SHR]], [[V]] +; CHECK-NEXT: store i32 [[OR]], i32* [[P:%.*]], align 4 +; CHECK-NEXT: ret i32 [[OR]] +; + %sub = sub nsw i32 %x, %v + %shr = ashr i32 %sub, 31 + %or = or i32 %shr, %v + store i32 %or, i32* %p + ret i32 %or +} + +; Negative Tests + +define i32 @sub_ashr_or_i32_extra_use_ashr(i32 %v, i32 %x, i32* %p) { +; CHECK-LABEL: @sub_ashr_or_i32_extra_use_ashr( +; CHECK-NEXT: [[SUB:%.*]] = sub nsw i32 [[X:%.*]], [[V:%.*]] +; CHECK-NEXT: [[SHR:%.*]] = ashr i32 [[SUB]], 31 +; CHECK-NEXT: store i32 [[SHR]], i32* [[P:%.*]], align 4 +; CHECK-NEXT: [[OR:%.*]] = or i32 [[SHR]], [[V]] +; CHECK-NEXT: ret i32 [[OR]] +; + %sub = sub nsw i32 %x, %v + %shr = ashr i32 %sub, 31 + store i32 %shr, i32* %p + %or = or i32 %shr, %v + ret i32 %or +} + +define i32 @sub_ashr_or_i32_no_nsw_nuw(i32 %v, i32 %x) { +; CHECK-LABEL: @sub_ashr_or_i32_no_nsw_nuw( +; CHECK-NEXT: [[SUB:%.*]] = sub i32 [[X:%.*]], [[V:%.*]] +; CHECK-NEXT: [[SHR:%.*]] = ashr i32 [[SUB]], 31 +; CHECK-NEXT: [[OR:%.*]] = or i32 [[SHR]], [[V]] +; CHECK-NEXT: ret i32 [[OR]] +; + %sub = sub i32 %x, %v + %shr = ashr i32 %sub, 31 + %or = or i32 %shr, %v + ret i32 %or +} + +define <4 x i32> @sub_ashr_or_i32_vec_undef1(<4 x i32> %v) { +; CHECK-LABEL: @sub_ashr_or_i32_vec_undef1( +; CHECK-NEXT: [[SUB:%.*]] = sub <4 x i32> , [[V:%.*]] +; CHECK-NEXT: [[SHR:%.*]] = ashr <4 x i32> [[SUB]], +; CHECK-NEXT: [[OR:%.*]] = or <4 x i32> [[SHR]], [[V]] +; CHECK-NEXT: ret <4 x i32> [[OR]] +; + %sub = sub <4 x i32> , %v + %shr = ashr <4 x i32> %sub, + %or = or <4 x i32> %shr, %v + ret <4 x i32> %or +} + +define <4 x i32> @sub_ashr_or_i32_vec_undef2(<4 x i32> %v) { +; CHECK-LABEL: @sub_ashr_or_i32_vec_undef2( +; CHECK-NEXT: [[SUB:%.*]] = sub nsw <4 x i32> , [[V:%.*]] +; CHECK-NEXT: [[SHR:%.*]] = ashr <4 x i32> [[SUB]], +; CHECK-NEXT: [[OR:%.*]] = or <4 x i32> [[SHR]], [[V]] +; CHECK-NEXT: ret <4 x i32> [[OR]] +; + %sub = sub nsw <4 x i32> , %v + %shr = ashr <4 x i32> %sub, + %or = or <4 x i32> %shr, %v + ret <4 x i32> %or +} + +define i32 @sub_ashr_or_i32_shift_wrong_bit(i32 %v, i32 %x) { +; CHECK-LABEL: @sub_ashr_or_i32_shift_wrong_bit( +; CHECK-NEXT: [[SUB:%.*]] = sub nsw i32 [[X:%.*]], [[V:%.*]] +; CHECK-NEXT: [[SHR:%.*]] = ashr i32 [[SUB]], 11 +; CHECK-NEXT: [[OR:%.*]] = or i32 [[SHR]], [[V]] +; CHECK-NEXT: ret i32 [[OR]] +; + %sub = sub nsw i32 %x, %v + %shr = ashr i32 %sub, 11 + %or = or i32 %shr, %v + ret i32 %or +}