diff --git a/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp @@ -71,10 +71,11 @@ // // There are many variants to this pattern: // a) (x & ((1 << MaskShAmt) - 1)) << ShiftShAmt +// b) (x & (~(-1 << maskNbits))) << shiftNbits // All these patterns can be simplified to just: // x << ShiftShAmt // iff: -// a) (MaskShAmt+ShiftShAmt) u>= bitwidth(x) +// a,b) (MaskShAmt+ShiftShAmt) u>= bitwidth(x) static Instruction * dropRedundantMaskingOfLeftShiftInput(BinaryOperator *OuterShift, const SimplifyQuery &SQ) { @@ -86,9 +87,11 @@ // ((1 << MaskShAmt) - 1) auto MaskA = m_Add(m_Shl(m_One(), m_Value(MaskShAmt)), m_AllOnes()); + // (~(-1 << maskNbits)) + auto MaskB = m_Xor(m_Shl(m_AllOnes(), m_Value(MaskShAmt)), m_AllOnes()); Value *X; - if (!match(Masked, m_c_And(MaskA, m_Value(X)))) + if (!match(Masked, m_c_And(m_CombineOr(MaskA, MaskB), m_Value(X)))) return nullptr; // Can we simplify (MaskShAmt+ShiftShAmt) ? diff --git a/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-variant-b.ll b/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-variant-b.ll --- a/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-variant-b.ll +++ b/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-variant-b.ll @@ -25,7 +25,7 @@ ; CHECK-NEXT: call void @use32(i32 [[T1]]) ; CHECK-NEXT: call void @use32(i32 [[T2]]) ; CHECK-NEXT: call void @use32(i32 [[T3]]) -; CHECK-NEXT: [[T4:%.*]] = shl i32 [[T2]], [[T3]] +; CHECK-NEXT: [[T4:%.*]] = shl i32 [[X]], [[T3]] ; CHECK-NEXT: ret i32 [[T4]] ; %t0 = shl i32 -1, %nbits @@ -50,7 +50,7 @@ ; CHECK-NEXT: call void @use32(i32 [[T1]]) ; CHECK-NEXT: call void @use32(i32 [[T2]]) ; CHECK-NEXT: call void @use32(i32 [[T3]]) -; CHECK-NEXT: [[T4:%.*]] = shl i32 [[T2]], [[T3]] +; CHECK-NEXT: [[T4:%.*]] = shl i32 [[X]], [[T3]] ; CHECK-NEXT: ret i32 [[T4]] ; %t0 = shl i32 -1, %nbits @@ -77,7 +77,7 @@ ; CHECK-NEXT: call void @use32(i32 [[T2]]) ; CHECK-NEXT: call void @use32(i32 [[T3]]) ; CHECK-NEXT: call void @use32(i32 [[T4]]) -; CHECK-NEXT: [[T5:%.*]] = shl i32 [[T3]], [[T4]] +; CHECK-NEXT: [[T5:%.*]] = shl i32 [[X]], [[T4]] ; CHECK-NEXT: ret i32 [[T5]] ; %t0 = add i32 %nbits, 1 @@ -109,7 +109,7 @@ ; CHECK-NEXT: call void @use3xi32(<3 x i32> [[T2]]) ; CHECK-NEXT: call void @use3xi32(<3 x i32> [[T3]]) ; CHECK-NEXT: call void @use3xi32(<3 x i32> [[T4]]) -; CHECK-NEXT: [[T5:%.*]] = shl <3 x i32> [[T3]], [[T4]] +; CHECK-NEXT: [[T5:%.*]] = shl <3 x i32> [[X]], [[T4]] ; CHECK-NEXT: ret <3 x i32> [[T5]] ; %t0 = add <3 x i32> %nbits, @@ -138,7 +138,7 @@ ; CHECK-NEXT: call void @use3xi32(<3 x i32> [[T2]]) ; CHECK-NEXT: call void @use3xi32(<3 x i32> [[T3]]) ; CHECK-NEXT: call void @use3xi32(<3 x i32> [[T4]]) -; CHECK-NEXT: [[T5:%.*]] = shl <3 x i32> [[T3]], [[T4]] +; CHECK-NEXT: [[T5:%.*]] = shl <3 x i32> [[X]], [[T4]] ; CHECK-NEXT: ret <3 x i32> [[T5]] ; %t0 = add <3 x i32> %nbits, @@ -166,7 +166,7 @@ ; CHECK-NEXT: call void @use3xi32(<3 x i32> [[T2]]) ; CHECK-NEXT: call void @use3xi32(<3 x i32> [[T3]]) ; CHECK-NEXT: call void @use3xi32(<3 x i32> [[T4]]) -; CHECK-NEXT: [[T5:%.*]] = shl <3 x i32> [[T3]], [[T4]] +; CHECK-NEXT: [[T5:%.*]] = shl <3 x i32> [[X]], [[T4]] ; CHECK-NEXT: ret <3 x i32> [[T5]] ; %t0 = add <3 x i32> %nbits, @@ -198,7 +198,7 @@ ; CHECK-NEXT: call void @use32(i32 [[T1]]) ; CHECK-NEXT: call void @use32(i32 [[T2]]) ; CHECK-NEXT: call void @use32(i32 [[T3]]) -; CHECK-NEXT: [[T4:%.*]] = shl i32 [[T2]], [[T3]] +; CHECK-NEXT: [[T4:%.*]] = shl i32 [[X]], [[T3]] ; CHECK-NEXT: ret i32 [[T4]] ; %x = call i32 @gen32() @@ -260,7 +260,7 @@ ; CHECK-NEXT: call void @use32(i32 [[T3]]) ; CHECK-NEXT: call void @use32(i32 [[T4]]) ; CHECK-NEXT: call void @use32(i32 [[T5]]) -; CHECK-NEXT: [[T6:%.*]] = shl i32 [[T4]], [[T5]] +; CHECK-NEXT: [[T6:%.*]] = shl i32 [[T1]], [[T5]] ; CHECK-NEXT: ret i32 [[T6]] ; %t0 = shl i32 -1, %nbits0 @@ -291,7 +291,7 @@ ; CHECK-NEXT: call void @use32(i32 [[T1]]) ; CHECK-NEXT: call void @use32(i32 [[T2]]) ; CHECK-NEXT: call void @use32(i32 [[T3]]) -; CHECK-NEXT: [[T4:%.*]] = shl nuw i32 [[T2]], [[T3]] +; CHECK-NEXT: [[T4:%.*]] = shl i32 [[X]], [[T3]] ; CHECK-NEXT: ret i32 [[T4]] ; %t0 = shl i32 -1, %nbits @@ -316,7 +316,7 @@ ; CHECK-NEXT: call void @use32(i32 [[T1]]) ; CHECK-NEXT: call void @use32(i32 [[T2]]) ; CHECK-NEXT: call void @use32(i32 [[T3]]) -; CHECK-NEXT: [[T4:%.*]] = shl nsw i32 [[T2]], [[T3]] +; CHECK-NEXT: [[T4:%.*]] = shl i32 [[X]], [[T3]] ; CHECK-NEXT: ret i32 [[T4]] ; %t0 = shl i32 -1, %nbits @@ -341,7 +341,7 @@ ; CHECK-NEXT: call void @use32(i32 [[T1]]) ; CHECK-NEXT: call void @use32(i32 [[T2]]) ; CHECK-NEXT: call void @use32(i32 [[T3]]) -; CHECK-NEXT: [[T4:%.*]] = shl nuw nsw i32 [[T2]], [[T3]] +; CHECK-NEXT: [[T4:%.*]] = shl i32 [[X]], [[T3]] ; CHECK-NEXT: ret i32 [[T4]] ; %t0 = shl i32 -1, %nbits