Index: llvm/trunk/lib/Transforms/InstCombine/InstCombineShifts.cpp =================================================================== --- llvm/trunk/lib/Transforms/InstCombine/InstCombineShifts.cpp +++ llvm/trunk/lib/Transforms/InstCombine/InstCombineShifts.cpp @@ -600,11 +600,11 @@ if (Instruction *V = commonShiftTransforms(I)) return V; + Type *Ty = I.getType(); const APInt *ShAmtAPInt; if (match(Op1, m_APInt(ShAmtAPInt))) { unsigned ShAmt = ShAmtAPInt->getZExtValue(); unsigned BitWidth = I.getType()->getScalarSizeInBits(); - Type *Ty = I.getType(); // shl (zext X), ShAmt --> zext (shl X, ShAmt) // This is only valid if X would have zeros shifted out. @@ -670,6 +670,15 @@ } } + // Transform (x >> y) << y to x & (-1 << y) + // Valid for any type of right-shift. + Value *X; + if (match(Op0, m_OneUse(m_Shr(m_Value(X), m_Specific(Op1))))) { + Constant *AllOnes = ConstantInt::getAllOnesValue(Ty); + Value *Mask = Builder.CreateShl(AllOnes, Op1); + return BinaryOperator::CreateAnd(Mask, X); + } + Constant *C1; if (match(Op1, m_Constant(C1))) { Constant *C2; Index: llvm/trunk/test/Transforms/InstCombine/canonicalize-ashr-shl-to-masking.ll =================================================================== --- llvm/trunk/test/Transforms/InstCombine/canonicalize-ashr-shl-to-masking.ll +++ llvm/trunk/test/Transforms/InstCombine/canonicalize-ashr-shl-to-masking.ll @@ -15,8 +15,8 @@ define i32 @positive_samevar(i32 %x, i32 %y) { ; CHECK-LABEL: @positive_samevar( -; CHECK-NEXT: [[TMP0:%.*]] = ashr i32 [[X:%.*]], [[Y:%.*]] -; CHECK-NEXT: [[RET:%.*]] = shl i32 [[TMP0]], [[Y]] +; CHECK-NEXT: [[TMP1:%.*]] = shl i32 -1, [[Y:%.*]] +; CHECK-NEXT: [[RET:%.*]] = and i32 [[TMP1]], [[X:%.*]] ; CHECK-NEXT: ret i32 [[RET]] ; %tmp0 = ashr i32 %x, %y @@ -114,8 +114,8 @@ define <2 x i32> @positive_samevar_vec(<2 x i32> %x, <2 x i32> %y) { ; CHECK-LABEL: @positive_samevar_vec( -; CHECK-NEXT: [[TMP0:%.*]] = ashr <2 x i32> [[X:%.*]], [[Y:%.*]] -; CHECK-NEXT: [[RET:%.*]] = shl <2 x i32> [[TMP0]], [[Y]] +; CHECK-NEXT: [[TMP1:%.*]] = shl <2 x i32> , [[Y:%.*]] +; CHECK-NEXT: [[RET:%.*]] = and <2 x i32> [[TMP1]], [[X:%.*]] ; CHECK-NEXT: ret <2 x i32> [[RET]] ; %tmp0 = ashr <2 x i32> %x, %y @@ -161,8 +161,7 @@ define <3 x i32> @positive_sameconst_vec_undef2(<3 x i32> %x) { ; CHECK-LABEL: @positive_sameconst_vec_undef2( -; CHECK-NEXT: [[TMP0:%.*]] = ashr <3 x i32> [[X:%.*]], -; CHECK-NEXT: [[RET:%.*]] = shl <3 x i32> [[TMP0]], +; CHECK-NEXT: [[RET:%.*]] = and <3 x i32> [[X:%.*]], ; CHECK-NEXT: ret <3 x i32> [[RET]] ; %tmp0 = ashr <3 x i32> %x, Index: llvm/trunk/test/Transforms/InstCombine/canonicalize-lshr-shl-to-masking.ll =================================================================== --- llvm/trunk/test/Transforms/InstCombine/canonicalize-lshr-shl-to-masking.ll +++ llvm/trunk/test/Transforms/InstCombine/canonicalize-lshr-shl-to-masking.ll @@ -15,8 +15,8 @@ define i32 @positive_samevar(i32 %x, i32 %y) { ; CHECK-LABEL: @positive_samevar( -; CHECK-NEXT: [[TMP0:%.*]] = lshr i32 [[X:%.*]], [[Y:%.*]] -; CHECK-NEXT: [[RET:%.*]] = shl i32 [[TMP0]], [[Y]] +; CHECK-NEXT: [[TMP1:%.*]] = shl i32 -1, [[Y:%.*]] +; CHECK-NEXT: [[RET:%.*]] = and i32 [[TMP1]], [[X:%.*]] ; CHECK-NEXT: ret i32 [[RET]] ; %tmp0 = lshr i32 %x, %y @@ -114,8 +114,8 @@ define <2 x i32> @positive_samevar_vec(<2 x i32> %x, <2 x i32> %y) { ; CHECK-LABEL: @positive_samevar_vec( -; CHECK-NEXT: [[TMP0:%.*]] = lshr <2 x i32> [[X:%.*]], [[Y:%.*]] -; CHECK-NEXT: [[RET:%.*]] = shl <2 x i32> [[TMP0]], [[Y]] +; CHECK-NEXT: [[TMP1:%.*]] = shl <2 x i32> , [[Y:%.*]] +; CHECK-NEXT: [[RET:%.*]] = and <2 x i32> [[TMP1]], [[X:%.*]] ; CHECK-NEXT: ret <2 x i32> [[RET]] ; %tmp0 = lshr <2 x i32> %x, %y @@ -161,8 +161,7 @@ define <3 x i32> @positive_sameconst_vec_undef2(<3 x i32> %x) { ; CHECK-LABEL: @positive_sameconst_vec_undef2( -; CHECK-NEXT: [[TMP0:%.*]] = lshr <3 x i32> [[X:%.*]], -; CHECK-NEXT: [[RET:%.*]] = shl <3 x i32> [[TMP0]], +; CHECK-NEXT: [[RET:%.*]] = and <3 x i32> [[X:%.*]], ; CHECK-NEXT: ret <3 x i32> [[RET]] ; %tmp0 = lshr <3 x i32> %x,