Index: llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp =================================================================== --- llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp +++ llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp @@ -1267,6 +1267,50 @@ return NewShl; } +/// Reduce a sequence of masked half-width multiplies to a single multiply. +/// ((XLow * YHigh) + (YLow * XHigh)) << HalfBits) + (XLow * YLow) --> X * Y +static Instruction *foldBoxMultiply(BinaryOperator &I) { + if (!I.getType()->isIntegerTy()) + return nullptr; + + unsigned BitWidth = I.getType()->getScalarSizeInBits(); + // Skip the odd bitwidth types and large bitwidth types + // TODO: Relax the constraint of wide/vectors types. + if ((BitWidth & 0x1) || (BitWidth > 128)) + return nullptr; + + unsigned HalfBits = BitWidth >> 1; + APInt HalfMask = APInt::getMaxValue(HalfBits); + + // ResLo = (CrossSum << HalfBits) + (YLo * XLo) + Value *XLo, *YLo; + Value *CrossSum; + if (!match(&I, m_c_Add(m_Shl(m_Value(CrossSum), m_SpecificInt(HalfBits)), + m_Mul(m_Value(YLo), m_Value(XLo))))) + return nullptr; + + // XLo = X & HalfMask + // YLo = Y & HalfMask + // TODO: Refactor with SimplifyDemandedBits or KnownBits known leading zeros + // to enhance robustness + Value *X, *Y; + if (!match(XLo, m_And(m_Value(X), m_SpecificInt(HalfMask))) || + !match(YLo, m_And(m_Value(Y), m_SpecificInt(HalfMask)))) + return nullptr; + + // CrossSum = (X' * (Y >> Halfbits)) + (Y' * (X >> HalfBits)) + // X' can be either X or XLo in the pattern (and the same for Y') + if (match(CrossSum, + m_c_Add(m_c_Mul(m_LShr(m_Specific(Y), m_SpecificInt(HalfBits)), + m_CombineOr(m_Specific(X), m_Specific(XLo))), + + m_c_Mul(m_LShr(m_Specific(X), m_SpecificInt(HalfBits)), + m_CombineOr(m_Specific(Y), m_Specific(YLo)))))) + return BinaryOperator::CreateMul(X, Y); + + return nullptr; +} + Instruction *InstCombinerImpl::visitAdd(BinaryOperator &I) { if (Value *V = simplifyAddInst(I.getOperand(0), I.getOperand(1), I.hasNoSignedWrap(), I.hasNoUnsignedWrap(), @@ -1286,6 +1330,9 @@ if (Value *V = SimplifyUsingDistributiveLaws(I)) return replaceInstUsesWith(I, V); + if (Instruction *R = foldBoxMultiply(I)) + return R; + if (Instruction *R = factorizeMathWithShlOps(I, Builder)) return R; Index: llvm/test/Transforms/InstCombine/mul_fold.ll =================================================================== --- llvm/test/Transforms/InstCombine/mul_fold.ll +++ llvm/test/Transforms/InstCombine/mul_fold.ll @@ -23,11 +23,9 @@ ; CHECK-NEXT: call void @use8(i8 [[M10]]) ; CHECK-NEXT: [[M01:%.*]] = mul nuw i8 [[IN1LO]], [[IN0HI]] ; CHECK-NEXT: call void @use8(i8 [[M01]]) -; CHECK-NEXT: [[M00:%.*]] = mul nuw i8 [[IN1LO]], [[IN0LO]] ; CHECK-NEXT: [[ADDC:%.*]] = add i8 [[M10]], [[M01]] ; CHECK-NEXT: call void @use8(i8 [[ADDC]]) -; CHECK-NEXT: [[SHL:%.*]] = shl i8 [[ADDC]], 4 -; CHECK-NEXT: [[RETLO:%.*]] = add i8 [[SHL]], [[M00]] +; CHECK-NEXT: [[RETLO:%.*]] = mul i8 [[IN0]], [[IN1]] ; CHECK-NEXT: ret i8 [[RETLO]] ; %In0Lo = and i8 %in0, 15 @@ -56,11 +54,9 @@ ; CHECK-NEXT: call void @use8(i8 [[M10]]) ; CHECK-NEXT: [[M01:%.*]] = mul nuw i8 [[IN1LO]], [[IN0HI]] ; CHECK-NEXT: call void @use8(i8 [[M01]]) -; CHECK-NEXT: [[M00:%.*]] = mul nuw i8 [[IN1LO]], [[IN0LO]] ; CHECK-NEXT: [[ADDC:%.*]] = add i8 [[M10]], [[M01]] ; CHECK-NEXT: call void @use8(i8 [[ADDC]]) -; CHECK-NEXT: [[SHL:%.*]] = shl i8 [[ADDC]], 4 -; CHECK-NEXT: [[RETLO:%.*]] = add i8 [[M00]], [[SHL]] +; CHECK-NEXT: [[RETLO:%.*]] = mul i8 [[IN0]], [[IN1]] ; CHECK-NEXT: ret i8 [[RETLO]] ; %In0Lo = and i8 %in0, 15 @@ -89,11 +85,9 @@ ; CHECK-NEXT: call void @use8(i8 [[M10]]) ; CHECK-NEXT: [[M01:%.*]] = mul nuw i8 [[IN1LO]], [[IN0HI]] ; CHECK-NEXT: call void @use8(i8 [[M01]]) -; CHECK-NEXT: [[M00:%.*]] = mul nuw i8 [[IN1LO]], [[IN0LO]] ; CHECK-NEXT: [[ADDC:%.*]] = add i8 [[M01]], [[M10]] ; CHECK-NEXT: call void @use8(i8 [[ADDC]]) -; CHECK-NEXT: [[SHL:%.*]] = shl i8 [[ADDC]], 4 -; CHECK-NEXT: [[RETLO:%.*]] = add i8 [[SHL]], [[M00]] +; CHECK-NEXT: [[RETLO:%.*]] = mul i8 [[IN0]], [[IN1]] ; CHECK-NEXT: ret i8 [[RETLO]] ; %In0Lo = and i8 %in0, 15 @@ -122,11 +116,9 @@ ; CHECK-NEXT: call void @use8(i8 [[M10]]) ; CHECK-NEXT: [[M01:%.*]] = mul nuw i8 [[IN1LO]], [[IN0HI]] ; CHECK-NEXT: call void @use8(i8 [[M01]]) -; CHECK-NEXT: [[M00:%.*]] = mul nuw i8 [[IN1LO]], [[IN0LO]] ; CHECK-NEXT: [[ADDC:%.*]] = add i8 [[M01]], [[M10]] ; CHECK-NEXT: call void @use8(i8 [[ADDC]]) -; CHECK-NEXT: [[SHL:%.*]] = shl i8 [[ADDC]], 4 -; CHECK-NEXT: [[RETLO:%.*]] = add i8 [[M00]], [[SHL]] +; CHECK-NEXT: [[RETLO:%.*]] = mul i8 [[IN0]], [[IN1]] ; CHECK-NEXT: ret i8 [[RETLO]] ; %In0Lo = and i8 %in0, 15 @@ -155,11 +147,9 @@ ; CHECK-NEXT: call void @use16(i16 [[M10]]) ; CHECK-NEXT: [[M01:%.*]] = mul nuw i16 [[IN1LO]], [[IN0HI]] ; CHECK-NEXT: call void @use16(i16 [[M01]]) -; CHECK-NEXT: [[M00:%.*]] = mul nuw i16 [[IN1LO]], [[IN0LO]] ; CHECK-NEXT: [[ADDC:%.*]] = add i16 [[M10]], [[M01]] ; CHECK-NEXT: call void @use16(i16 [[ADDC]]) -; CHECK-NEXT: [[SHL:%.*]] = shl i16 [[ADDC]], 8 -; CHECK-NEXT: [[RETLO:%.*]] = add i16 [[SHL]], [[M00]] +; CHECK-NEXT: [[RETLO:%.*]] = mul i16 [[IN0]], [[IN1]] ; CHECK-NEXT: ret i16 [[RETLO]] ; %In0Lo = and i16 %in0, 255 @@ -188,11 +178,9 @@ ; CHECK-NEXT: call void @use16(i16 [[M10]]) ; CHECK-NEXT: [[M01:%.*]] = mul nuw i16 [[IN1LO]], [[IN0HI]] ; CHECK-NEXT: call void @use16(i16 [[M01]]) -; CHECK-NEXT: [[M00:%.*]] = mul nuw i16 [[IN1LO]], [[IN0LO]] ; CHECK-NEXT: [[ADDC:%.*]] = add i16 [[M10]], [[M01]] ; CHECK-NEXT: call void @use16(i16 [[ADDC]]) -; CHECK-NEXT: [[SHL:%.*]] = shl i16 [[ADDC]], 8 -; CHECK-NEXT: [[RETLO:%.*]] = add i16 [[M00]], [[SHL]] +; CHECK-NEXT: [[RETLO:%.*]] = mul i16 [[IN0]], [[IN1]] ; CHECK-NEXT: ret i16 [[RETLO]] ; %In0Lo = and i16 %in0, 255 @@ -221,11 +209,9 @@ ; CHECK-NEXT: call void @use16(i16 [[M10]]) ; CHECK-NEXT: [[M01:%.*]] = mul nuw i16 [[IN1LO]], [[IN0HI]] ; CHECK-NEXT: call void @use16(i16 [[M01]]) -; CHECK-NEXT: [[M00:%.*]] = mul nuw i16 [[IN1LO]], [[IN0LO]] ; CHECK-NEXT: [[ADDC:%.*]] = add i16 [[M01]], [[M10]] ; CHECK-NEXT: call void @use16(i16 [[ADDC]]) -; CHECK-NEXT: [[SHL:%.*]] = shl i16 [[ADDC]], 8 -; CHECK-NEXT: [[RETLO:%.*]] = add i16 [[SHL]], [[M00]] +; CHECK-NEXT: [[RETLO:%.*]] = mul i16 [[IN0]], [[IN1]] ; CHECK-NEXT: ret i16 [[RETLO]] ; %In0Lo = and i16 %in0, 255 @@ -254,11 +240,9 @@ ; CHECK-NEXT: call void @use16(i16 [[M10]]) ; CHECK-NEXT: [[M01:%.*]] = mul nuw i16 [[IN1LO]], [[IN0HI]] ; CHECK-NEXT: call void @use16(i16 [[M01]]) -; CHECK-NEXT: [[M00:%.*]] = mul nuw i16 [[IN1LO]], [[IN0LO]] ; CHECK-NEXT: [[ADDC:%.*]] = add i16 [[M01]], [[M10]] ; CHECK-NEXT: call void @use16(i16 [[ADDC]]) -; CHECK-NEXT: [[SHL:%.*]] = shl i16 [[ADDC]], 8 -; CHECK-NEXT: [[RETLO:%.*]] = add i16 [[M00]], [[SHL]] +; CHECK-NEXT: [[RETLO:%.*]] = mul i16 [[IN0]], [[IN1]] ; CHECK-NEXT: ret i16 [[RETLO]] ; %In0Lo = and i16 %in0, 255 @@ -285,11 +269,9 @@ ; CHECK-NEXT: [[IN1HI:%.*]] = lshr i32 [[IN1]], 16 ; CHECK-NEXT: [[M10:%.*]] = mul nuw i32 [[IN1HI]], [[IN0LO]] ; CHECK-NEXT: [[M01:%.*]] = mul nuw i32 [[IN0HI]], [[IN1LO]] -; CHECK-NEXT: [[M00:%.*]] = mul nuw i32 [[IN1LO]], [[IN0LO]] ; CHECK-NEXT: [[ADDC:%.*]] = add i32 [[M10]], [[M01]] ; CHECK-NEXT: call void @use32(i32 [[ADDC]]) -; CHECK-NEXT: [[SHL:%.*]] = shl i32 [[ADDC]], 16 -; CHECK-NEXT: [[RETLO:%.*]] = add i32 [[SHL]], [[M00]] +; CHECK-NEXT: [[RETLO:%.*]] = mul i32 [[IN0]], [[IN1]] ; CHECK-NEXT: ret i32 [[RETLO]] ; %In0Lo = and i32 %in0, 65535 @@ -314,11 +296,9 @@ ; CHECK-NEXT: [[IN1HI:%.*]] = lshr i32 [[IN1]], 16 ; CHECK-NEXT: [[M10:%.*]] = mul nuw i32 [[IN1HI]], [[IN0LO]] ; CHECK-NEXT: [[M01:%.*]] = mul nuw i32 [[IN0HI]], [[IN1LO]] -; CHECK-NEXT: [[M00:%.*]] = mul nuw i32 [[IN1LO]], [[IN0LO]] ; CHECK-NEXT: [[ADDC:%.*]] = add i32 [[M10]], [[M01]] ; CHECK-NEXT: call void @use32(i32 [[ADDC]]) -; CHECK-NEXT: [[SHL:%.*]] = shl i32 [[ADDC]], 16 -; CHECK-NEXT: [[RETLO:%.*]] = add i32 [[M00]], [[SHL]] +; CHECK-NEXT: [[RETLO:%.*]] = mul i32 [[IN0]], [[IN1]] ; CHECK-NEXT: ret i32 [[RETLO]] ; %In0Lo = and i32 %in0, 65535 @@ -343,11 +323,9 @@ ; CHECK-NEXT: [[IN1HI:%.*]] = lshr i32 [[IN1]], 16 ; CHECK-NEXT: [[M10:%.*]] = mul nuw i32 [[IN1HI]], [[IN0LO]] ; CHECK-NEXT: [[M01:%.*]] = mul nuw i32 [[IN0HI]], [[IN1LO]] -; CHECK-NEXT: [[M00:%.*]] = mul nuw i32 [[IN1LO]], [[IN0LO]] ; CHECK-NEXT: [[ADDC:%.*]] = add i32 [[M01]], [[M10]] ; CHECK-NEXT: call void @use32(i32 [[ADDC]]) -; CHECK-NEXT: [[SHL:%.*]] = shl i32 [[ADDC]], 16 -; CHECK-NEXT: [[RETLO:%.*]] = add i32 [[SHL]], [[M00]] +; CHECK-NEXT: [[RETLO:%.*]] = mul i32 [[IN0]], [[IN1]] ; CHECK-NEXT: ret i32 [[RETLO]] ; %In0Lo = and i32 %in0, 65535 @@ -372,11 +350,9 @@ ; CHECK-NEXT: [[IN1HI:%.*]] = lshr i32 [[IN1]], 16 ; CHECK-NEXT: [[M10:%.*]] = mul nuw i32 [[IN1HI]], [[IN0LO]] ; CHECK-NEXT: [[M01:%.*]] = mul nuw i32 [[IN0HI]], [[IN1LO]] -; CHECK-NEXT: [[M00:%.*]] = mul nuw i32 [[IN1LO]], [[IN0LO]] ; CHECK-NEXT: [[ADDC:%.*]] = add i32 [[M01]], [[M10]] ; CHECK-NEXT: call void @use32(i32 [[ADDC]]) -; CHECK-NEXT: [[SHL:%.*]] = shl i32 [[ADDC]], 16 -; CHECK-NEXT: [[RETLO:%.*]] = add i32 [[M00]], [[SHL]] +; CHECK-NEXT: [[RETLO:%.*]] = mul i32 [[IN0]], [[IN1]] ; CHECK-NEXT: ret i32 [[RETLO]] ; %In0Lo = and i32 %in0, 65535 @@ -401,11 +377,9 @@ ; CHECK-NEXT: [[IN1HI:%.*]] = lshr i64 [[IN1]], 32 ; CHECK-NEXT: [[M10:%.*]] = mul nuw i64 [[IN0LO]], [[IN1HI]] ; CHECK-NEXT: [[M01:%.*]] = mul nuw i64 [[IN0HI]], [[IN1LO]] -; CHECK-NEXT: [[M00:%.*]] = mul nuw i64 [[IN1LO]], [[IN0LO]] ; CHECK-NEXT: [[ADDC:%.*]] = add i64 [[M10]], [[M01]] ; CHECK-NEXT: call void @use64(i64 [[ADDC]]) -; CHECK-NEXT: [[SHL:%.*]] = shl i64 [[ADDC]], 32 -; CHECK-NEXT: [[RETLO:%.*]] = add i64 [[SHL]], [[M00]] +; CHECK-NEXT: [[RETLO:%.*]] = mul i64 [[IN0]], [[IN1]] ; CHECK-NEXT: ret i64 [[RETLO]] ; %In0Lo = and i64 %in0, 4294967295 @@ -430,11 +404,9 @@ ; CHECK-NEXT: [[IN1HI:%.*]] = lshr i64 [[IN1]], 32 ; CHECK-NEXT: [[M10:%.*]] = mul nuw i64 [[IN0LO]], [[IN1HI]] ; CHECK-NEXT: [[M01:%.*]] = mul nuw i64 [[IN0HI]], [[IN1LO]] -; CHECK-NEXT: [[M00:%.*]] = mul nuw i64 [[IN1LO]], [[IN0LO]] ; CHECK-NEXT: [[ADDC:%.*]] = add i64 [[M10]], [[M01]] ; CHECK-NEXT: call void @use64(i64 [[ADDC]]) -; CHECK-NEXT: [[SHL:%.*]] = shl i64 [[ADDC]], 32 -; CHECK-NEXT: [[RETLO:%.*]] = add i64 [[M00]], [[SHL]] +; CHECK-NEXT: [[RETLO:%.*]] = mul i64 [[IN0]], [[IN1]] ; CHECK-NEXT: ret i64 [[RETLO]] ; %In0Lo = and i64 %in0, 4294967295 @@ -459,11 +431,9 @@ ; CHECK-NEXT: [[IN1HI:%.*]] = lshr i64 [[IN1]], 32 ; CHECK-NEXT: [[M10:%.*]] = mul nuw i64 [[IN0LO]], [[IN1HI]] ; CHECK-NEXT: [[M01:%.*]] = mul nuw i64 [[IN0HI]], [[IN1LO]] -; CHECK-NEXT: [[M00:%.*]] = mul nuw i64 [[IN1LO]], [[IN0LO]] ; CHECK-NEXT: [[ADDC:%.*]] = add i64 [[M01]], [[M10]] ; CHECK-NEXT: call void @use64(i64 [[ADDC]]) -; CHECK-NEXT: [[SHL:%.*]] = shl i64 [[ADDC]], 32 -; CHECK-NEXT: [[RETLO:%.*]] = add i64 [[SHL]], [[M00]] +; CHECK-NEXT: [[RETLO:%.*]] = mul i64 [[IN0]], [[IN1]] ; CHECK-NEXT: ret i64 [[RETLO]] ; %In0Lo = and i64 %in0, 4294967295 @@ -488,11 +458,9 @@ ; CHECK-NEXT: [[IN1HI:%.*]] = lshr i64 [[IN1]], 32 ; CHECK-NEXT: [[M10:%.*]] = mul nuw i64 [[IN0LO]], [[IN1HI]] ; CHECK-NEXT: [[M01:%.*]] = mul nuw i64 [[IN0HI]], [[IN1LO]] -; CHECK-NEXT: [[M00:%.*]] = mul nuw i64 [[IN1LO]], [[IN0LO]] ; CHECK-NEXT: [[ADDC:%.*]] = add i64 [[M01]], [[M10]] ; CHECK-NEXT: call void @use64(i64 [[ADDC]]) -; CHECK-NEXT: [[SHL:%.*]] = shl i64 [[ADDC]], 32 -; CHECK-NEXT: [[RETLO:%.*]] = add i64 [[M00]], [[SHL]] +; CHECK-NEXT: [[RETLO:%.*]] = mul i64 [[IN0]], [[IN1]] ; CHECK-NEXT: ret i64 [[RETLO]] ; %In0Lo = and i64 %in0, 4294967295 @@ -517,11 +485,9 @@ ; CHECK-NEXT: [[IN1HI:%.*]] = lshr i32 [[IN1]], 16 ; CHECK-NEXT: [[M10:%.*]] = mul nuw i32 [[IN1HI]], [[IN0LO]] ; CHECK-NEXT: [[M01:%.*]] = mul nuw i32 [[IN1LO]], [[IN0HI]] -; CHECK-NEXT: [[M00:%.*]] = mul nuw i32 [[IN1LO]], [[IN0LO]] ; CHECK-NEXT: [[ADDC:%.*]] = add i32 [[M10]], [[M01]] ; CHECK-NEXT: call void @use32(i32 [[ADDC]]) -; CHECK-NEXT: [[SHL:%.*]] = shl i32 [[ADDC]], 16 -; CHECK-NEXT: [[RETLO:%.*]] = add i32 [[SHL]], [[M00]] +; CHECK-NEXT: [[RETLO:%.*]] = mul i32 [[IN0]], [[IN1]] ; CHECK-NEXT: ret i32 [[RETLO]] ; %In0Lo = and i32 %in0, 65535 @@ -542,16 +508,7 @@ ; https://alive2.llvm.org/ce/z/2BqKLt define i8 @mul8_low(i8 %in0, i8 %in1) { ; CHECK-LABEL: @mul8_low( -; CHECK-NEXT: [[IN0LO:%.*]] = and i8 [[IN0:%.*]], 15 -; CHECK-NEXT: [[IN0HI:%.*]] = lshr i8 [[IN0]], 4 -; CHECK-NEXT: [[IN1LO:%.*]] = and i8 [[IN1:%.*]], 15 -; CHECK-NEXT: [[IN1HI:%.*]] = lshr i8 [[IN1]], 4 -; CHECK-NEXT: [[M10:%.*]] = mul i8 [[IN1HI]], [[IN0]] -; CHECK-NEXT: [[M01:%.*]] = mul i8 [[IN0HI]], [[IN1]] -; CHECK-NEXT: [[M00:%.*]] = mul nuw i8 [[IN1LO]], [[IN0LO]] -; CHECK-NEXT: [[ADDC:%.*]] = add i8 [[M10]], [[M01]] -; CHECK-NEXT: [[SHL:%.*]] = shl i8 [[ADDC]], 4 -; CHECK-NEXT: [[RETLO:%.*]] = add i8 [[SHL]], [[M00]] +; CHECK-NEXT: [[RETLO:%.*]] = mul i8 [[IN0:%.*]], [[IN1:%.*]] ; CHECK-NEXT: ret i8 [[RETLO]] ; %In0Lo = and i8 %in0, 15 @@ -569,16 +526,7 @@ define i16 @mul16_low(i16 %in0, i16 %in1) { ; CHECK-LABEL: @mul16_low( -; CHECK-NEXT: [[IN0LO:%.*]] = and i16 [[IN0:%.*]], 255 -; CHECK-NEXT: [[IN0HI:%.*]] = lshr i16 [[IN0]], 8 -; CHECK-NEXT: [[IN1LO:%.*]] = and i16 [[IN1:%.*]], 255 -; CHECK-NEXT: [[IN1HI:%.*]] = lshr i16 [[IN1]], 8 -; CHECK-NEXT: [[M10:%.*]] = mul i16 [[IN1HI]], [[IN0]] -; CHECK-NEXT: [[M01:%.*]] = mul i16 [[IN0HI]], [[IN1]] -; CHECK-NEXT: [[M00:%.*]] = mul nuw i16 [[IN1LO]], [[IN0LO]] -; CHECK-NEXT: [[ADDC:%.*]] = add i16 [[M10]], [[M01]] -; CHECK-NEXT: [[SHL:%.*]] = shl i16 [[ADDC]], 8 -; CHECK-NEXT: [[RETLO:%.*]] = add i16 [[SHL]], [[M00]] +; CHECK-NEXT: [[RETLO:%.*]] = mul i16 [[IN0:%.*]], [[IN1:%.*]] ; CHECK-NEXT: ret i16 [[RETLO]] ; %In0Lo = and i16 %in0, 255 @@ -596,16 +544,7 @@ define i32 @mul32_low(i32 %in0, i32 %in1) { ; CHECK-LABEL: @mul32_low( -; CHECK-NEXT: [[IN0LO:%.*]] = and i32 [[IN0:%.*]], 65535 -; CHECK-NEXT: [[IN0HI:%.*]] = lshr i32 [[IN0]], 16 -; CHECK-NEXT: [[IN1LO:%.*]] = and i32 [[IN1:%.*]], 65535 -; CHECK-NEXT: [[IN1HI:%.*]] = lshr i32 [[IN1]], 16 -; CHECK-NEXT: [[M10:%.*]] = mul i32 [[IN1HI]], [[IN0]] -; CHECK-NEXT: [[M01:%.*]] = mul i32 [[IN0HI]], [[IN1]] -; CHECK-NEXT: [[M00:%.*]] = mul nuw i32 [[IN1LO]], [[IN0LO]] -; CHECK-NEXT: [[ADDC:%.*]] = add i32 [[M10]], [[M01]] -; CHECK-NEXT: [[SHL:%.*]] = shl i32 [[ADDC]], 16 -; CHECK-NEXT: [[RETLO:%.*]] = add i32 [[SHL]], [[M00]] +; CHECK-NEXT: [[RETLO:%.*]] = mul i32 [[IN0:%.*]], [[IN1:%.*]] ; CHECK-NEXT: ret i32 [[RETLO]] ; %In0Lo = and i32 %in0, 65535 @@ -623,16 +562,7 @@ define i64 @mul64_low(i64 %in0, i64 %in1) { ; CHECK-LABEL: @mul64_low( -; CHECK-NEXT: [[IN0LO:%.*]] = and i64 [[IN0:%.*]], 4294967295 -; CHECK-NEXT: [[IN0HI:%.*]] = lshr i64 [[IN0]], 32 -; CHECK-NEXT: [[IN1LO:%.*]] = and i64 [[IN1:%.*]], 4294967295 -; CHECK-NEXT: [[IN1HI:%.*]] = lshr i64 [[IN1]], 32 -; CHECK-NEXT: [[M10:%.*]] = mul i64 [[IN1HI]], [[IN0]] -; CHECK-NEXT: [[M01:%.*]] = mul i64 [[IN0HI]], [[IN1]] -; CHECK-NEXT: [[M00:%.*]] = mul nuw i64 [[IN1LO]], [[IN0LO]] -; CHECK-NEXT: [[ADDC:%.*]] = add i64 [[M10]], [[M01]] -; CHECK-NEXT: [[SHL:%.*]] = shl i64 [[ADDC]], 32 -; CHECK-NEXT: [[RETLO:%.*]] = add i64 [[SHL]], [[M00]] +; CHECK-NEXT: [[RETLO:%.*]] = mul i64 [[IN0:%.*]], [[IN1:%.*]] ; CHECK-NEXT: ret i64 [[RETLO]] ; %In0Lo = and i64 %in0, 4294967295 @@ -650,16 +580,7 @@ define i128 @mul128_low(i128 %in0, i128 %in1) { ; CHECK-LABEL: @mul128_low( -; CHECK-NEXT: [[IN0LO:%.*]] = and i128 [[IN0:%.*]], 18446744073709551615 -; CHECK-NEXT: [[IN0HI:%.*]] = lshr i128 [[IN0]], 64 -; CHECK-NEXT: [[IN1LO:%.*]] = and i128 [[IN1:%.*]], 18446744073709551615 -; CHECK-NEXT: [[IN1HI:%.*]] = lshr i128 [[IN1]], 64 -; CHECK-NEXT: [[M10:%.*]] = mul i128 [[IN1HI]], [[IN0]] -; CHECK-NEXT: [[M01:%.*]] = mul i128 [[IN0HI]], [[IN1]] -; CHECK-NEXT: [[M00:%.*]] = mul nuw i128 [[IN1LO]], [[IN0LO]] -; CHECK-NEXT: [[ADDC:%.*]] = add i128 [[M10]], [[M01]] -; CHECK-NEXT: [[SHL:%.*]] = shl i128 [[ADDC]], 64 -; CHECK-NEXT: [[RETLO:%.*]] = add i128 [[SHL]], [[M00]] +; CHECK-NEXT: [[RETLO:%.*]] = mul i128 [[IN0:%.*]], [[IN1:%.*]] ; CHECK-NEXT: ret i128 [[RETLO]] ; %In0Lo = and i128 %in0, 18446744073709551615