diff --git a/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp @@ -75,11 +75,12 @@ // c) (x & (-1 >> MaskShAmt)) << ShiftShAmt // d) (x & ((-1 << MaskShAmt) >> MaskShAmt)) << ShiftShAmt // e) ((x << MaskShAmt) l>> MaskShAmt) << ShiftShAmt +// f) ((x << MaskShAmt) a>> MaskShAmt) << ShiftShAmt // All these patterns can be simplified to just: // x << ShiftShAmt // iff: -// a,b) (MaskShAmt+ShiftShAmt) u>= bitwidth(x) -// c,d,e) (ShiftShAmt-MaskShAmt) s>= 0 (i.e. ShiftShAmt u>= MaskShAmt) +// a,b) (MaskShAmt+ShiftShAmt) u>= bitwidth(x) +// c,d,e,f) (ShiftShAmt-MaskShAmt) s>= 0 (i.e. ShiftShAmt u>= MaskShAmt) static Instruction * dropRedundantMaskingOfLeftShiftInput(BinaryOperator *OuterShift, const SimplifyQuery &SQ) { @@ -115,8 +116,8 @@ return nullptr; // All good, we can do this fold. } else if (match(Masked, m_c_And(m_CombineOr(MaskC, MaskD), m_Value(X))) || - match(Masked, m_LShr(m_Shl(m_Value(X), m_Value(MaskShAmt)), - m_Deferred(MaskShAmt)))) { + match(Masked, m_Shr(m_Shl(m_Value(X), m_Value(MaskShAmt)), + m_Deferred(MaskShAmt)))) { // Can we simplify (ShiftShAmt-MaskShAmt) ? Value *ShAmtsDiff = SimplifyBinOp(Instruction::BinaryOps::Sub, ShiftShAmt, MaskShAmt, diff --git a/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-variant-f.ll b/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-variant-f.ll --- a/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-variant-f.ll +++ b/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-variant-f.ll @@ -21,7 +21,7 @@ ; CHECK-NEXT: [[T1:%.*]] = ashr i32 [[T0]], [[NBITS]] ; CHECK-NEXT: call void @use32(i32 [[T0]]) ; CHECK-NEXT: call void @use32(i32 [[T1]]) -; CHECK-NEXT: [[T2:%.*]] = shl i32 [[T1]], [[NBITS]] +; CHECK-NEXT: [[T2:%.*]] = shl i32 [[X]], [[NBITS]] ; CHECK-NEXT: ret i32 [[T2]] ; %t0 = shl i32 %x, %nbits @@ -40,7 +40,7 @@ ; CHECK-NEXT: call void @use32(i32 [[T0]]) ; CHECK-NEXT: call void @use32(i32 [[T1]]) ; CHECK-NEXT: call void @use32(i32 [[T2]]) -; CHECK-NEXT: [[T3:%.*]] = shl i32 [[T1]], [[T2]] +; CHECK-NEXT: [[T3:%.*]] = shl i32 [[X]], [[T2]] ; CHECK-NEXT: ret i32 [[T3]] ; %t0 = shl i32 %x, %nbits @@ -65,7 +65,7 @@ ; CHECK-NEXT: call void @use3xi32(<3 x i32> [[T0]]) ; CHECK-NEXT: call void @use3xi32(<3 x i32> [[T1]]) ; CHECK-NEXT: call void @use3xi32(<3 x i32> [[T2]]) -; CHECK-NEXT: [[T3:%.*]] = shl <3 x i32> [[T1]], [[T2]] +; CHECK-NEXT: [[T3:%.*]] = shl <3 x i32> [[X]], [[T2]] ; CHECK-NEXT: ret <3 x i32> [[T3]] ; %t0 = shl <3 x i32> %x, %nbits @@ -86,7 +86,7 @@ ; CHECK-NEXT: call void @use3xi32(<3 x i32> [[T0]]) ; CHECK-NEXT: call void @use3xi32(<3 x i32> [[T1]]) ; CHECK-NEXT: call void @use3xi32(<3 x i32> [[T2]]) -; CHECK-NEXT: [[T3:%.*]] = shl <3 x i32> [[T1]], [[T2]] +; CHECK-NEXT: [[T3:%.*]] = shl <3 x i32> [[X]], [[T2]] ; CHECK-NEXT: ret <3 x i32> [[T3]] ; %t0 = shl <3 x i32> %x, %nbits @@ -107,7 +107,7 @@ ; CHECK-NEXT: call void @use3xi32(<3 x i32> [[T0]]) ; CHECK-NEXT: call void @use3xi32(<3 x i32> [[T1]]) ; CHECK-NEXT: call void @use3xi32(<3 x i32> [[T2]]) -; CHECK-NEXT: [[T3:%.*]] = shl <3 x i32> [[T1]], [[T2]] +; CHECK-NEXT: [[T3:%.*]] = shl <3 x i32> [[X]], [[T2]] ; CHECK-NEXT: ret <3 x i32> [[T3]] ; %t0 = shl <3 x i32> %x, %nbits @@ -128,7 +128,7 @@ ; CHECK-NEXT: [[T1:%.*]] = ashr i32 [[T0]], [[NBITS]] ; CHECK-NEXT: call void @use32(i32 [[T0]]) ; CHECK-NEXT: call void @use32(i32 [[T1]]) -; CHECK-NEXT: [[T2:%.*]] = shl nuw i32 [[T1]], [[NBITS]] +; CHECK-NEXT: [[T2:%.*]] = shl i32 [[X]], [[NBITS]] ; CHECK-NEXT: ret i32 [[T2]] ; %t0 = shl i32 %x, %nbits @@ -145,7 +145,7 @@ ; CHECK-NEXT: [[T1:%.*]] = ashr i32 [[T0]], [[NBITS]] ; CHECK-NEXT: call void @use32(i32 [[T0]]) ; CHECK-NEXT: call void @use32(i32 [[T1]]) -; CHECK-NEXT: [[T2:%.*]] = shl nsw i32 [[T1]], [[NBITS]] +; CHECK-NEXT: [[T2:%.*]] = shl i32 [[X]], [[NBITS]] ; CHECK-NEXT: ret i32 [[T2]] ; %t0 = shl i32 %x, %nbits @@ -162,7 +162,7 @@ ; CHECK-NEXT: [[T1:%.*]] = ashr i32 [[T0]], [[NBITS]] ; CHECK-NEXT: call void @use32(i32 [[T0]]) ; CHECK-NEXT: call void @use32(i32 [[T1]]) -; CHECK-NEXT: [[T2:%.*]] = shl nuw nsw i32 [[T1]], [[NBITS]] +; CHECK-NEXT: [[T2:%.*]] = shl i32 [[X]], [[NBITS]] ; CHECK-NEXT: ret i32 [[T2]] ; %t0 = shl i32 %x, %nbits