Index: llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp =================================================================== --- llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp +++ llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp @@ -153,8 +153,10 @@ if (Value *V = SimplifyUsingDistributiveLaws(I)) return replaceInstUsesWith(I, V); - // X * -1 == 0 - X Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); + unsigned BitWidth = I.getType()->getScalarSizeInBits(); + + // X * -1 == 0 - X if (match(Op1, m_AllOnes())) { BinaryOperator *BO = BinaryOperator::CreateNeg(Op0, I.getName()); if (I.hasNoSignedWrap()) @@ -360,6 +362,19 @@ if (match(Op1, m_LShr(m_Value(X), m_APInt(C))) && *C == C->getBitWidth() - 1) return BinaryOperator::CreateAnd(Builder.CreateAShr(X, *C), Op0); + // ((ashr X, 31) | 1) * X --> abs(X) + // X * ((ashr X, 31) | 1) --> abs(X) + if (match(&I, m_c_BinOp(m_Or(m_AShr(m_Value(X), + m_SpecificIntAllowUndef(BitWidth - 1)), + m_One()), + m_Deferred(X)))) { + Value *Abs = Builder.CreateBinaryIntrinsic( + Intrinsic::abs, X, + ConstantInt::getBool(I.getContext(), I.hasNoSignedWrap())); + Abs->takeName(&I); + return replaceInstUsesWith(I, Abs); + } + if (Instruction *Ext = narrowMathIfNoOverflow(I)) return Ext; Index: llvm/test/Transforms/InstCombine/ashr-or-mul-abs.ll =================================================================== --- llvm/test/Transforms/InstCombine/ashr-or-mul-abs.ll +++ llvm/test/Transforms/InstCombine/ashr-or-mul-abs.ll @@ -6,9 +6,7 @@ define i32 @ashr_or_mul_to_abs(i32 %X) { ; CHECK-LABEL: @ashr_or_mul_to_abs( -; CHECK-NEXT: [[I:%.*]] = ashr i32 [[X:%.*]], 31 -; CHECK-NEXT: [[I1:%.*]] = or i32 [[I]], 1 -; CHECK-NEXT: [[I2:%.*]] = mul nsw i32 [[I1]], [[X]] +; CHECK-NEXT: [[I2:%.*]] = call i32 @llvm.abs.i32(i32 [[X:%.*]], i1 true) ; CHECK-NEXT: ret i32 [[I2]] ; %i = ashr i32 %X, 31 @@ -19,9 +17,7 @@ define i32 @ashr_or_mul_to_abs2(i32 %X) { ; CHECK-LABEL: @ashr_or_mul_to_abs2( -; CHECK-NEXT: [[I:%.*]] = ashr i32 [[X:%.*]], 31 -; CHECK-NEXT: [[I1:%.*]] = or i32 [[I]], 1 -; CHECK-NEXT: [[I2:%.*]] = mul i32 [[I1]], [[X]] +; CHECK-NEXT: [[I2:%.*]] = call i32 @llvm.abs.i32(i32 [[X:%.*]], i1 false) ; CHECK-NEXT: ret i32 [[I2]] ; %i = ashr i32 %X, 31 @@ -32,9 +28,7 @@ define i32 @ashr_or_mul_to_abs3(i32 %X) { ; CHECK-LABEL: @ashr_or_mul_to_abs3( -; CHECK-NEXT: [[I:%.*]] = ashr i32 [[X:%.*]], 31 -; CHECK-NEXT: [[I1:%.*]] = or i32 [[I]], 1 -; CHECK-NEXT: [[I2:%.*]] = mul i32 [[I1]], [[X]] +; CHECK-NEXT: [[I2:%.*]] = call i32 @llvm.abs.i32(i32 [[X:%.*]], i1 false) ; CHECK-NEXT: ret i32 [[I2]] ; %i = ashr i32 %X, 31 @@ -46,9 +40,7 @@ define <4 x i32> @ashr_or_mul_to_abs_vec(<4 x i32> %X) { ; CHECK-LABEL: @ashr_or_mul_to_abs_vec( -; CHECK-NEXT: [[I:%.*]] = ashr <4 x i32> [[X:%.*]], -; CHECK-NEXT: [[I1:%.*]] = or <4 x i32> [[I]], -; CHECK-NEXT: [[I2:%.*]] = mul <4 x i32> [[I1]], [[X]] +; CHECK-NEXT: [[I2:%.*]] = call <4 x i32> @llvm.abs.v4i32(<4 x i32> [[X:%.*]], i1 false) ; CHECK-NEXT: ret <4 x i32> [[I2]] ; %i = ashr <4 x i32> %X, @@ -59,9 +51,7 @@ define <4 x i32> @ashr_or_mul_to_abs_vec2(<4 x i32> %X) { ; CHECK-LABEL: @ashr_or_mul_to_abs_vec2( -; CHECK-NEXT: [[I:%.*]] = ashr <4 x i32> [[X:%.*]], -; CHECK-NEXT: [[I1:%.*]] = or <4 x i32> [[I]], -; CHECK-NEXT: [[I2:%.*]] = mul nsw <4 x i32> [[I1]], [[X]] +; CHECK-NEXT: [[I2:%.*]] = call <4 x i32> @llvm.abs.v4i32(<4 x i32> [[X:%.*]], i1 true) ; CHECK-NEXT: ret <4 x i32> [[I2]] ; %i = ashr <4 x i32> %X, @@ -72,9 +62,7 @@ define <4 x i32> @ashr_or_mul_to_abs_vec3_undef(<4 x i32> %X) { ; CHECK-LABEL: @ashr_or_mul_to_abs_vec3_undef( -; CHECK-NEXT: [[I:%.*]] = ashr <4 x i32> [[X:%.*]], -; CHECK-NEXT: [[I1:%.*]] = or <4 x i32> [[I]], -; CHECK-NEXT: [[I2:%.*]] = mul <4 x i32> [[I1]], [[X]] +; CHECK-NEXT: [[I2:%.*]] = call <4 x i32> @llvm.abs.v4i32(<4 x i32> [[X:%.*]], i1 false) ; CHECK-NEXT: ret <4 x i32> [[I2]] ; %i = ashr <4 x i32> %X,