Index: llvm/trunk/lib/Transforms/InstCombine/InstCombineShifts.cpp =================================================================== --- llvm/trunk/lib/Transforms/InstCombine/InstCombineShifts.cpp +++ llvm/trunk/lib/Transforms/InstCombine/InstCombineShifts.cpp @@ -202,10 +202,35 @@ // shall be unset in the root value (OuterShift). If ShAmtsDiff is negative, // we'll need to also produce a mask to unset ShAmtsDiff high bits. // So, does *any* channel need a mask? (is ShiftShAmt u>= MaskShAmt ?) - if (!match(ShAmtsDiff, m_NonNegative())) - return nullptr; // FIXME. + if (!match(ShAmtsDiff, m_NonNegative())) { + // This sub-fold (with mask) is invalid for 'ashr' "masking" instruction. + if (match(Masked, m_AShr(m_Value(), m_Value()))) + return nullptr; + // For a mask we need to get rid of old masking instruction. + if (!Masked->hasOneUse()) + return nullptr; // Else we can't perform the fold. + Type *Ty = X->getType(); + unsigned BitWidth = Ty->getScalarSizeInBits(); + // We should produce compute the mask in wider type, and truncate later! + // Get type twice as wide element-wise (same number of elements!). + Type *ExtendedScalarTy = Type::getIntNTy(Ty->getContext(), 2 * BitWidth); + Type *ExtendedTy = + Ty->isVectorTy() + ? VectorType::get(ExtendedScalarTy, Ty->getVectorNumElements()) + : ExtendedScalarTy; + auto *ExtendedNumHighBitsToClear = ConstantExpr::getZExt( + ConstantExpr::getAdd( + ConstantExpr::getNeg(ShAmtsDiff), + ConstantInt::get(Ty, BitWidth, /*isSigned=*/false)), + ExtendedTy); + // And compute the mask as usual: (-1 l>> (ShAmtsDiff)) + auto *ExtendedAllOnes = ConstantExpr::getAllOnesValue(ExtendedTy); + auto *ExtendedMask = + ConstantExpr::getLShr(ExtendedAllOnes, ExtendedNumHighBitsToClear); + NewMask = ConstantExpr::getTrunc(ExtendedMask, Ty); + } else + NewMask = nullptr; // No mask needed. // All good, we can do this fold. - NewMask = nullptr; // No mask needed. } else return nullptr; // Don't know anything about this pattern. Index: llvm/trunk/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-c.ll =================================================================== --- llvm/trunk/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-c.ll +++ llvm/trunk/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-c.ll @@ -16,11 +16,11 @@ define i32 @t0_basic(i32 %x, i32 %nbits) { ; CHECK-LABEL: @t0_basic( ; CHECK-NEXT: [[T0:%.*]] = lshr i32 -1, [[NBITS:%.*]] -; CHECK-NEXT: [[T1:%.*]] = and i32 [[T0]], [[X:%.*]] ; CHECK-NEXT: [[T2:%.*]] = add i32 [[NBITS]], -1 ; CHECK-NEXT: call void @use32(i32 [[T0]]) ; CHECK-NEXT: call void @use32(i32 [[T2]]) -; CHECK-NEXT: [[T3:%.*]] = shl i32 [[T1]], [[T2]] +; CHECK-NEXT: [[TMP1:%.*]] = shl i32 [[X:%.*]], [[T2]] +; CHECK-NEXT: [[T3:%.*]] = and i32 [[TMP1]], 2147483647 ; CHECK-NEXT: ret i32 [[T3]] ; %t0 = lshr i32 -1, %nbits @@ -39,11 +39,11 @@ define <8 x i32> @t1_vec_splat(<8 x i32> %x, <8 x i32> %nbits) { ; CHECK-LABEL: @t1_vec_splat( ; CHECK-NEXT: [[T0:%.*]] = lshr <8 x i32> , [[NBITS:%.*]] -; CHECK-NEXT: [[T1:%.*]] = and <8 x i32> [[T0]], [[X:%.*]] ; CHECK-NEXT: [[T2:%.*]] = add <8 x i32> [[NBITS]], ; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T0]]) ; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T2]]) -; CHECK-NEXT: [[T3:%.*]] = shl <8 x i32> [[T1]], [[T2]] +; CHECK-NEXT: [[TMP1:%.*]] = shl <8 x i32> [[X:%.*]], [[T2]] +; CHECK-NEXT: [[T3:%.*]] = and <8 x i32> [[TMP1]], ; CHECK-NEXT: ret <8 x i32> [[T3]] ; %t0 = lshr <8 x i32> , %nbits @@ -58,11 +58,11 @@ define <8 x i32> @t1_vec_nonsplat(<8 x i32> %x, <8 x i32> %nbits) { ; CHECK-LABEL: @t1_vec_nonsplat( ; CHECK-NEXT: [[T0:%.*]] = lshr <8 x i32> , [[NBITS:%.*]] -; CHECK-NEXT: [[T1:%.*]] = and <8 x i32> [[T0]], [[X:%.*]] ; CHECK-NEXT: [[T2:%.*]] = add <8 x i32> [[NBITS]], ; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T0]]) ; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T2]]) -; CHECK-NEXT: [[T3:%.*]] = shl <8 x i32> [[T1]], [[T2]] +; CHECK-NEXT: [[TMP1:%.*]] = shl <8 x i32> [[X:%.*]], [[T2]] +; CHECK-NEXT: [[T3:%.*]] = and <8 x i32> [[TMP1]], ; CHECK-NEXT: ret <8 x i32> [[T3]] ; %t0 = lshr <8 x i32> , %nbits Index: llvm/trunk/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-d.ll =================================================================== --- llvm/trunk/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-d.ll +++ llvm/trunk/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-d.ll @@ -17,12 +17,12 @@ ; CHECK-LABEL: @t0_basic( ; CHECK-NEXT: [[T0:%.*]] = shl i32 -1, [[NBITS:%.*]] ; CHECK-NEXT: [[T1:%.*]] = lshr i32 [[T0]], [[NBITS]] -; CHECK-NEXT: [[T2:%.*]] = and i32 [[T1]], [[X:%.*]] ; CHECK-NEXT: [[T3:%.*]] = add i32 [[NBITS]], -1 ; CHECK-NEXT: call void @use32(i32 [[T0]]) ; CHECK-NEXT: call void @use32(i32 [[T1]]) ; CHECK-NEXT: call void @use32(i32 [[T3]]) -; CHECK-NEXT: [[T4:%.*]] = shl i32 [[T2]], [[T3]] +; CHECK-NEXT: [[TMP1:%.*]] = shl i32 [[X:%.*]], [[T3]] +; CHECK-NEXT: [[T4:%.*]] = and i32 [[TMP1]], 2147483647 ; CHECK-NEXT: ret i32 [[T4]] ; %t0 = shl i32 -1, %nbits @@ -44,12 +44,12 @@ ; CHECK-LABEL: @t2_vec_splat( ; CHECK-NEXT: [[T0:%.*]] = shl <8 x i32> , [[NBITS:%.*]] ; CHECK-NEXT: [[T1:%.*]] = lshr <8 x i32> [[T0]], [[NBITS]] -; CHECK-NEXT: [[T2:%.*]] = and <8 x i32> [[T1]], [[X:%.*]] ; CHECK-NEXT: [[T3:%.*]] = add <8 x i32> [[NBITS]], ; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T0]]) ; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T1]]) ; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T3]]) -; CHECK-NEXT: [[T4:%.*]] = shl <8 x i32> [[T2]], [[T3]] +; CHECK-NEXT: [[TMP1:%.*]] = shl <8 x i32> [[X:%.*]], [[T3]] +; CHECK-NEXT: [[T4:%.*]] = and <8 x i32> [[TMP1]], ; CHECK-NEXT: ret <8 x i32> [[T4]] ; %t0 = shl <8 x i32> , %nbits @@ -67,12 +67,12 @@ ; CHECK-LABEL: @t2_vec_nonsplat( ; CHECK-NEXT: [[T0:%.*]] = shl <8 x i32> , [[NBITS:%.*]] ; CHECK-NEXT: [[T1:%.*]] = lshr <8 x i32> [[T0]], [[NBITS]] -; CHECK-NEXT: [[T2:%.*]] = and <8 x i32> [[T1]], [[X:%.*]] ; CHECK-NEXT: [[T3:%.*]] = add <8 x i32> [[NBITS]], ; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T0]]) ; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T1]]) ; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T3]]) -; CHECK-NEXT: [[T4:%.*]] = shl <8 x i32> [[T2]], [[T3]] +; CHECK-NEXT: [[TMP1:%.*]] = shl <8 x i32> [[X:%.*]], [[T3]] +; CHECK-NEXT: [[T4:%.*]] = and <8 x i32> [[TMP1]], ; CHECK-NEXT: ret <8 x i32> [[T4]] ; %t0 = shl <8 x i32> , %nbits Index: llvm/trunk/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-e.ll =================================================================== --- llvm/trunk/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-e.ll +++ llvm/trunk/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-e.ll @@ -16,11 +16,11 @@ define i32 @t0_basic(i32 %x, i32 %nbits) { ; CHECK-LABEL: @t0_basic( ; CHECK-NEXT: [[T0:%.*]] = shl i32 [[X:%.*]], [[NBITS:%.*]] -; CHECK-NEXT: [[T1:%.*]] = lshr i32 [[T0]], [[NBITS]] ; CHECK-NEXT: [[T2:%.*]] = add i32 [[NBITS]], -1 ; CHECK-NEXT: call void @use32(i32 [[T0]]) ; CHECK-NEXT: call void @use32(i32 [[T2]]) -; CHECK-NEXT: [[T3:%.*]] = shl i32 [[T1]], [[T2]] +; CHECK-NEXT: [[TMP1:%.*]] = shl i32 [[X]], [[T2]] +; CHECK-NEXT: [[T3:%.*]] = and i32 [[TMP1]], 2147483647 ; CHECK-NEXT: ret i32 [[T3]] ; %t0 = shl i32 %x, %nbits @@ -39,11 +39,11 @@ define <8 x i32> @t1_vec_splat(<8 x i32> %x, <8 x i32> %nbits) { ; CHECK-LABEL: @t1_vec_splat( ; CHECK-NEXT: [[T0:%.*]] = shl <8 x i32> [[X:%.*]], [[NBITS:%.*]] -; CHECK-NEXT: [[T1:%.*]] = lshr <8 x i32> [[T0]], [[NBITS]] ; CHECK-NEXT: [[T2:%.*]] = add <8 x i32> [[NBITS]], ; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T0]]) ; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T2]]) -; CHECK-NEXT: [[T3:%.*]] = shl <8 x i32> [[T1]], [[T2]] +; CHECK-NEXT: [[TMP1:%.*]] = shl <8 x i32> [[X]], [[T2]] +; CHECK-NEXT: [[T3:%.*]] = and <8 x i32> [[TMP1]], ; CHECK-NEXT: ret <8 x i32> [[T3]] ; %t0 = shl <8 x i32> %x, %nbits @@ -58,11 +58,11 @@ define <8 x i32> @t1_vec_nonsplat(<8 x i32> %x, <8 x i32> %nbits) { ; CHECK-LABEL: @t1_vec_nonsplat( ; CHECK-NEXT: [[T0:%.*]] = shl <8 x i32> [[X:%.*]], [[NBITS:%.*]] -; CHECK-NEXT: [[T1:%.*]] = lshr <8 x i32> [[T0]], [[NBITS]] ; CHECK-NEXT: [[T2:%.*]] = add <8 x i32> [[NBITS]], ; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T0]]) ; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T2]]) -; CHECK-NEXT: [[T3:%.*]] = shl <8 x i32> [[T1]], [[T2]] +; CHECK-NEXT: [[TMP1:%.*]] = shl <8 x i32> [[X]], [[T2]] +; CHECK-NEXT: [[T3:%.*]] = and <8 x i32> [[TMP1]], ; CHECK-NEXT: ret <8 x i32> [[T3]] ; %t0 = shl <8 x i32> %x, %nbits