diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp @@ -1270,13 +1270,9 @@ /// Reduce a sequence of masked half-width multiplies to a single multiply. /// ((XLow * YHigh) + (YLow * XHigh)) << HalfBits) + (XLow * YLow) --> X * Y static Instruction *foldBoxMultiply(BinaryOperator &I) { - if (!I.getType()->isIntegerTy()) - return nullptr; - unsigned BitWidth = I.getType()->getScalarSizeInBits(); - // Skip the odd bitwidth types and large bitwidth types - // TODO: Relax the constraint of wide/vectors types. - if ((BitWidth & 0x1) || (BitWidth > 128)) + // Skip the odd bitwidth types. + if ((BitWidth & 0x1)) return nullptr; unsigned HalfBits = BitWidth >> 1; diff --git a/llvm/test/Transforms/InstCombine/mul_fold.ll b/llvm/test/Transforms/InstCombine/mul_fold.ll --- a/llvm/test/Transforms/InstCombine/mul_fold.ll +++ b/llvm/test/Transforms/InstCombine/mul_fold.ll @@ -521,19 +521,10 @@ ret i128 %retLo } -; TODO: Skip vector type +; Support vector type define <2 x i8> @mul_v2i8_low(<2 x i8> %in0, <2 x i8> %in1) { ; CHECK-LABEL: @mul_v2i8_low( -; CHECK-NEXT: [[IN0LO:%.*]] = and <2 x i8> [[IN0:%.*]], -; CHECK-NEXT: [[IN0HI:%.*]] = lshr <2 x i8> [[IN0]], -; CHECK-NEXT: [[IN1LO:%.*]] = and <2 x i8> [[IN1:%.*]], -; CHECK-NEXT: [[IN1HI:%.*]] = lshr <2 x i8> [[IN1]], -; CHECK-NEXT: [[M10:%.*]] = mul <2 x i8> [[IN1HI]], [[IN0]] -; CHECK-NEXT: [[M01:%.*]] = mul <2 x i8> [[IN0HI]], [[IN1]] -; CHECK-NEXT: [[M00:%.*]] = mul nuw <2 x i8> [[IN1LO]], [[IN0LO]] -; CHECK-NEXT: [[ADDC:%.*]] = add <2 x i8> [[M10]], [[M01]] -; CHECK-NEXT: [[SHL:%.*]] = shl <2 x i8> [[ADDC]], -; CHECK-NEXT: [[RETLO:%.*]] = add <2 x i8> [[SHL]], [[M00]] +; CHECK-NEXT: [[RETLO:%.*]] = mul <2 x i8> [[IN0:%.*]], [[IN1:%.*]] ; CHECK-NEXT: ret <2 x i8> [[RETLO]] ; %In0Lo = and <2 x i8> %in0, @@ -551,17 +542,11 @@ define <2 x i8> @mul_v2i8_low_one_extra_user(<2 x i8> %in0, <2 x i8> %in1) { ; CHECK-LABEL: @mul_v2i8_low_one_extra_user( -; CHECK-NEXT: [[IN0LO:%.*]] = and <2 x i8> [[IN0:%.*]], -; CHECK-NEXT: [[IN0HI:%.*]] = lshr <2 x i8> [[IN0]], +; CHECK-NEXT: [[IN0HI:%.*]] = lshr <2 x i8> [[IN0:%.*]], ; CHECK-NEXT: [[IN1LO:%.*]] = and <2 x i8> [[IN1:%.*]], -; CHECK-NEXT: [[IN1HI:%.*]] = lshr <2 x i8> [[IN1]], -; CHECK-NEXT: [[M10:%.*]] = mul <2 x i8> [[IN1HI]], [[IN0]] ; CHECK-NEXT: [[M01:%.*]] = mul nuw <2 x i8> [[IN1LO]], [[IN0HI]] ; CHECK-NEXT: call void @use_v2i8(<2 x i8> [[M01]]) -; CHECK-NEXT: [[M00:%.*]] = mul nuw <2 x i8> [[IN1LO]], [[IN0LO]] -; CHECK-NEXT: [[ADDC:%.*]] = add <2 x i8> [[M10]], [[M01]] -; CHECK-NEXT: [[SHL:%.*]] = shl <2 x i8> [[ADDC]], -; CHECK-NEXT: [[RETLO:%.*]] = add <2 x i8> [[SHL]], [[M00]] +; CHECK-NEXT: [[RETLO:%.*]] = mul <2 x i8> [[IN0]], [[IN1]] ; CHECK-NEXT: ret <2 x i8> [[RETLO]] ; %In0Lo = and <2 x i8> %in0, @@ -578,19 +563,10 @@ ret <2 x i8> %retLo } -; TODO: Support wide width +; Support wide width define i130 @mul130_low(i130 %in0, i130 %in1) { ; CHECK-LABEL: @mul130_low( -; CHECK-NEXT: [[IN0LO:%.*]] = and i130 [[IN0:%.*]], 36893488147419103231 -; CHECK-NEXT: [[IN0HI:%.*]] = lshr i130 [[IN0]], 65 -; CHECK-NEXT: [[IN1LO:%.*]] = and i130 [[IN1:%.*]], 36893488147419103231 -; CHECK-NEXT: [[IN1HI:%.*]] = lshr i130 [[IN1]], 65 -; CHECK-NEXT: [[M10:%.*]] = mul i130 [[IN1HI]], [[IN0]] -; CHECK-NEXT: [[M01:%.*]] = mul i130 [[IN0HI]], [[IN1]] -; CHECK-NEXT: [[M00:%.*]] = mul nuw i130 [[IN1LO]], [[IN0LO]] -; CHECK-NEXT: [[ADDC:%.*]] = add i130 [[M10]], [[M01]] -; CHECK-NEXT: [[SHL:%.*]] = shl i130 [[ADDC]], 65 -; CHECK-NEXT: [[RETLO:%.*]] = add i130 [[SHL]], [[M00]] +; CHECK-NEXT: [[RETLO:%.*]] = mul i130 [[IN0:%.*]], [[IN1:%.*]] ; CHECK-NEXT: ret i130 [[RETLO]] ; %In0Lo = and i130 %in0, 36893488147419103231 @@ -609,16 +585,10 @@ define i130 @mul130_low_one_extra_user(i130 %in0, i130 %in1) { ; CHECK-LABEL: @mul130_low_one_extra_user( ; CHECK-NEXT: [[IN0LO:%.*]] = and i130 [[IN0:%.*]], 36893488147419103231 -; CHECK-NEXT: [[IN0HI:%.*]] = lshr i130 [[IN0]], 65 -; CHECK-NEXT: [[IN1LO:%.*]] = and i130 [[IN1:%.*]], 36893488147419103231 -; CHECK-NEXT: [[IN1HI:%.*]] = lshr i130 [[IN1]], 65 +; CHECK-NEXT: [[IN1HI:%.*]] = lshr i130 [[IN1:%.*]], 65 ; CHECK-NEXT: [[M10:%.*]] = mul nuw i130 [[IN1HI]], [[IN0LO]] ; CHECK-NEXT: call void @use130(i130 [[M10]]) -; CHECK-NEXT: [[M01:%.*]] = mul i130 [[IN0HI]], [[IN1]] -; CHECK-NEXT: [[M00:%.*]] = mul nuw i130 [[IN1LO]], [[IN0LO]] -; CHECK-NEXT: [[ADDC:%.*]] = add i130 [[M10]], [[M01]] -; CHECK-NEXT: [[SHL:%.*]] = shl i130 [[ADDC]], 65 -; CHECK-NEXT: [[RETLO:%.*]] = add i130 [[SHL]], [[M00]] +; CHECK-NEXT: [[RETLO:%.*]] = mul i130 [[IN0]], [[IN1]] ; CHECK-NEXT: ret i130 [[RETLO]] ; %In0Lo = and i130 %in0, 36893488147419103231 diff --git a/llvm/test/Transforms/InstCombine/mul_full_64.ll b/llvm/test/Transforms/InstCombine/mul_full_64.ll --- a/llvm/test/Transforms/InstCombine/mul_full_64.ll +++ b/llvm/test/Transforms/InstCombine/mul_full_64.ll @@ -448,7 +448,7 @@ ret i64 %hi } - +; TODO: https://alive2.llvm.org/ce/z/y26zaW define i64 @mullo(i64 %x, i64 %y) { ; CHECK-LABEL: @mullo( ; CHECK-NEXT: [[XL:%.*]] = and i64 [[X:%.*]], 4294967295