Index: lib/CodeGen/SelectionDAG/DAGCombiner.cpp =================================================================== --- lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -544,6 +544,14 @@ /// single-use) and if missed an empty SDValue is returned. SDValue distributeTruncateThroughAnd(SDNode *N); + /// \brief Try to transform a multiplication of shape: + /// (mul x, (2^N + 1)) => (add (shl x, N), x) + /// (mul x, (2^N - 1)) => (sub (shl x, N), x) + /// (mul x, -(2^N - 1)) => (sub x, (shl x, N)) + /// (mul x, -(2^N + 1)) => -(add (shl x, N), x) + /// (mul x, (2^N + 1) * 2^M) => (shl (add (shl x, N), x), M) + SDValue TransformMulWithPow2DisplacedBy1(SDNode* N); + public: /// Runs the dag combiner on all nodes in the work list void Run(CombineLevel AtLevel); @@ -2605,6 +2613,106 @@ return SDValue(); } +SDValue DAGCombiner::TransformMulWithPow2DisplacedBy1(SDNode *N) { + SDValue N0 = N->getOperand(0); + SDValue N1 = N->getOperand(1); + EVT VT = N0.getValueType(); + // Perform transformation only for legal types to + // avoid problems with backends like Hexagon. + + // FIXME: There is a possible regression in x86. + // lea-3.ll test fails because RDI is used instead RCX. + // So we do nothing for non-vector types for now. + if (!TLI.isTypeLegal(VT) || !VT.isVector()) + return SDValue(); + + // AllArePow2 holds the amount of lanes which are: + // 1) constant + // 2) of value (2^C) +/- 1 + // 3) have equal sign to the first lane + // AllArePow2 > 0 indicates that constants are (2^C) + 1 + // AllArePow2 < 0 indicates taht constants are (2^C) - 1 + int AllArePow2 = 0; + // SignDirection indeciates that constants are: + // positive if SignDirection > 0 + // negative if SignDirection < 0 + int SignDirection = 0; + // TrailingZeros holds the number of trailing zeros if + // constants are (+/-(2^N) +/- 1) * (2^M) + unsigned TrailingZeroes = 0; + bool Match = matchUnaryPredicate(N1, [&](ConstantSDNode *C) { + const APInt &ConstantValue = C->getAPIntValue(); + if (!TrailingZeroes && ConstantValue.getSExtValue() != 0) { + TrailingZeroes = ConstantValue.countTrailingZeros(); + } else { + // Trailing zeros does not match for all constants in a vector. + if (TrailingZeroes != ConstantValue.countTrailingZeros()) + return false; + } + + APInt Plus1 = ConstantValue.ashr(TrailingZeroes).abs() + 1; + APInt Minus1 = ConstantValue.ashr(TrailingZeroes).abs() - 1; + + int IsPow2 = Plus1.isPowerOf2() ? 1 : Minus1.isPowerOf2() ? -1 : 0; + if (!SignDirection) + SignDirection = ConstantValue.isNonNegative() ? 1 : -1; + // Avoid getting poisoned through shifts > bitsize. + if (IsPow2 && VT.getScalarSizeInBits() > (ConstantValue + IsPow2).logBase2()) { + // Only match values which have equal sign bits. + if ((ConstantValue.getSExtValue() < 0) == (SignDirection < 0)) { + AllArePow2 += IsPow2; + return true; + } + } + return false; + }); + + if (!Match || static_cast(abs(AllArePow2)) != (VT.isVector() ? VT.getVectorNumElements() : 1)) + return SDValue(); + + SDLoc DL(N); + SDValue Const0 = DAG.getConstant(0, DL, VT); + + if (SignDirection < 0) { + // Clear the sign bits of the constant vector. + N1 = DAG.FoldConstantArithmetic(ISD::SUB, DL, VT, + Const0.getNode(), + N1.getNode()); + } + if (TrailingZeroes) { + N1 = DAG.FoldConstantArithmetic(ISD::SRA, DL, VT, + N1.getNode(), + DAG.getConstant(TrailingZeroes, DL, VT).getNode()); + } + + SDValue LogBase2 = BuildLogBase2( + DAG.FoldConstantArithmetic(ISD::ADD, DL, VT, N1.getNode(), + DAG.getConstant((AllArePow2 > 0 ? 1 : -1), DL, VT).getNode()), DL); + AddToWorklist(LogBase2.getNode()); + + SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, N0, LogBase2); + Shl.getNode()->setFlags(N->getFlags()); + AddToWorklist(Shl.getNode()); + + SDValue &LHS = Shl, &RHS = N0; + if (SignDirection < 0) + std::swap(LHS, RHS); + + auto Res = DAG.getNode(AllArePow2 > 0 ? ISD::SUB : ISD::ADD, DL, VT, LHS, RHS); + + if (SignDirection < 0 && AllArePow2 < 0) { + AddToWorklist(Res.getNode()); + Res = DAG.getNode(ISD::SUB, DL, VT, Const0, Res); + } + if (TrailingZeroes) { + AddToWorklist(Res.getNode()); + return DAG.getNode(ISD::SHL, DL, VT, Res, + DAG.getConstant(TrailingZeroes, DL, VT)); + } + + return Res; +} + SDValue DAGCombiner::visitMUL(SDNode *N) { SDValue N0 = N->getOperand(0); SDValue N1 = N->getOperand(1); @@ -2694,7 +2802,11 @@ DAG.getConstant(Log2Val, DL, getShiftAmountTy(N0.getValueType())))); } - + // Transform (mul X, +/-(1 << c) +/- 1) to appropriate shift patterns. + if (isConstantOrConstantVector(N1)) { + if (SDValue Res = TransformMulWithPow2DisplacedBy1(N)) + return Res; + } // (mul (shl X, c1), c2) -> (mul X, c2 << c1) if (N0.getOpcode() == ISD::SHL && isConstantOrConstantVector(N1, /* NoOpaques */ true) && Index: test/CodeGen/AArch64/aarch64-smull.ll =================================================================== --- test/CodeGen/AArch64/aarch64-smull.ll +++ test/CodeGen/AArch64/aarch64-smull.ll @@ -227,7 +227,7 @@ ; CHECK-LABEL: smull_extvec_v8i8_v8i16: ; CHECK: smull {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b %tmp3 = sext <8 x i8> %arg to <8 x i16> - %tmp4 = mul <8 x i16> %tmp3, + %tmp4 = mul <8 x i16> %tmp3, ret <8 x i16> %tmp4 } @@ -245,7 +245,7 @@ ; CHECK-LABEL: smull_extvec_v4i16_v4i32: ; CHECK: smull {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h %tmp3 = sext <4 x i16> %arg to <4 x i32> - %tmp4 = mul <4 x i32> %tmp3, + %tmp4 = mul <4 x i32> %tmp3, ret <4 x i32> %tmp4 } @@ -261,7 +261,7 @@ ; CHECK-LABEL: umull_extvec_v8i8_v8i16: ; CHECK: umull {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b %tmp3 = zext <8 x i8> %arg to <8 x i16> - %tmp4 = mul <8 x i16> %tmp3, + %tmp4 = mul <8 x i16> %tmp3, ret <8 x i16> %tmp4 } @@ -297,7 +297,7 @@ ; CHECK-LABEL: smullWithInconsistentExtensions: ; CHECK: mul {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h %1 = sext <8 x i8> %vec to <8 x i16> - %2 = mul <8 x i16> %1, + %2 = mul <8 x i16> %1, %3 = extractelement <8 x i16> %2, i32 0 ret i16 %3 } Index: test/CodeGen/ARM/2012-08-23-legalize-vmull.ll =================================================================== --- test/CodeGen/ARM/2012-08-23-legalize-vmull.ll +++ test/CodeGen/ARM/2012-08-23-legalize-vmull.ll @@ -18,7 +18,7 @@ %0 = load <4 x i8>, <4 x i8>* %v, align 8 %v0 = sext <4 x i8> %0 to <4 x i32> ;CHECK: vmull - %v1 = mul <4 x i32> %v0, + %v1 = mul <4 x i32> %v0, store <4 x i32> %v1, <4 x i32>* undef, align 8 ret void; } @@ -31,7 +31,7 @@ %0 = load <2 x i8>, <2 x i8>* %v, align 8 %v0 = sext <2 x i8> %0 to <2 x i64> ;CHECK: vmull - %v1 = mul <2 x i64> %v0, + %v1 = mul <2 x i64> %v0, store <2 x i64> %v1, <2 x i64>* undef, align 8 ret void; } @@ -44,7 +44,7 @@ %0 = load <2 x i16>, <2 x i16>* %v, align 8 %v0 = sext <2 x i16> %0 to <2 x i64> ;CHECK: vmull - %v1 = mul <2 x i64> %v0, + %v1 = mul <2 x i64> %v0, store <2 x i64> %v1, <2 x i64>* undef, align 8 ret void; } Index: test/CodeGen/ARM/vmul.ll =================================================================== --- test/CodeGen/ARM/vmul.ll +++ test/CodeGen/ARM/vmul.ll @@ -374,7 +374,7 @@ ; CHECK: vmull_extvec_s8 ; CHECK: vmull.s8 %tmp3 = sext <8 x i8> %arg to <8 x i16> - %tmp4 = mul <8 x i16> %tmp3, + %tmp4 = mul <8 x i16> %tmp3, ret <8 x i16> %tmp4 } @@ -382,7 +382,7 @@ ; CHECK: vmull_extvec_u8 ; CHECK: vmull.u8 %tmp3 = zext <8 x i8> %arg to <8 x i16> - %tmp4 = mul <8 x i16> %tmp3, + %tmp4 = mul <8 x i16> %tmp3, ret <8 x i16> %tmp4 } @@ -410,7 +410,7 @@ ; CHECK: vmull_extvec_s16 ; CHECK: vmull.s16 %tmp3 = sext <4 x i16> %arg to <4 x i32> - %tmp4 = mul <4 x i32> %tmp3, + %tmp4 = mul <4 x i32> %tmp3, ret <4 x i32> %tmp4 } Index: test/CodeGen/Hexagon/vect/vect-cst-v4i32.ll =================================================================== --- test/CodeGen/Hexagon/vect/vect-cst-v4i32.ll +++ test/CodeGen/Hexagon/vect/vect-cst-v4i32.ll @@ -1,6 +1,6 @@ ; RUN: llc -march=hexagon -mcpu=hexagonv5 -disable-hsdr < %s | FileCheck %s ; This one should generate a combine with two immediates. -; CHECK: combine(#7,#7) +; CHECK: combine(#11,#11) @B = common global [400 x i32] zeroinitializer, align 8 @A = common global [400 x i32] zeroinitializer, align 8 @C = common global [400 x i32] zeroinitializer, align 8 @@ -19,7 +19,7 @@ %p_arrayidx = getelementptr [400 x i32], [400 x i32]* @B, i32 0, i32 %polly.loopiv23 %vector_ptr = bitcast i32* %p_arrayidx to <4 x i32>* %_p_vec_full = load <4 x i32>, <4 x i32>* %vector_ptr, align 8 - %mulp_vec = mul <4 x i32> %_p_vec_full, + %mulp_vec = mul <4 x i32> %_p_vec_full, %vector_ptr12 = bitcast i32* %p_arrayidx1 to <4 x i32>* %_p_vec_full13 = load <4 x i32>, <4 x i32>* %vector_ptr12, align 8 %addp_vec = add <4 x i32> %_p_vec_full13, %mulp_vec Index: test/CodeGen/Hexagon/vect/vect-vsplatb.ll =================================================================== --- test/CodeGen/Hexagon/vect/vect-vsplatb.ll +++ test/CodeGen/Hexagon/vect/vect-vsplatb.ll @@ -1,5 +1,5 @@ ; RUN: llc -march=hexagon -disable-hcp < %s | FileCheck %s -; Make sure we build the constant vector <7, 7, 7, 7> with a vsplatb. +; Make sure we build the constant vector <11, 11, 11, 11> with a vsplatb. ; CHECK: vsplatb @B = common global [400 x i8] zeroinitializer, align 8 @A = common global [400 x i8] zeroinitializer, align 8 @@ -19,7 +19,7 @@ %p_arrayidx = getelementptr [400 x i8], [400 x i8]* @B, i32 0, i32 %polly.loopiv25 %vector_ptr = bitcast i8* %p_arrayidx to <4 x i8>* %_p_vec_full = load <4 x i8>, <4 x i8>* %vector_ptr, align 8 - %mulp_vec = mul <4 x i8> %_p_vec_full, + %mulp_vec = mul <4 x i8> %_p_vec_full, %vector_ptr14 = bitcast i8* %p_arrayidx1 to <4 x i8>* %_p_vec_full15 = load <4 x i8>, <4 x i8>* %vector_ptr14, align 8 %addp_vec = add <4 x i8> %_p_vec_full15, %mulp_vec Index: test/CodeGen/Hexagon/vect/vect-vsplath.ll =================================================================== --- test/CodeGen/Hexagon/vect/vect-vsplath.ll +++ test/CodeGen/Hexagon/vect/vect-vsplath.ll @@ -1,5 +1,5 @@ ; RUN: llc -march=hexagon -disable-hcp < %s | FileCheck %s -; Make sure we build the constant vector <7, 7, 7, 7> with a vsplath. +; Make sure we build the constant vector <11, 11, 11, 11> with a vsplath. ; CHECK: vsplath @B = common global [400 x i16] zeroinitializer, align 8 @A = common global [400 x i16] zeroinitializer, align 8 @@ -19,7 +19,7 @@ %p_arrayidx = getelementptr [400 x i16], [400 x i16]* @B, i32 0, i32 %polly.loopiv26 %vector_ptr = bitcast i16* %p_arrayidx to <4 x i16>* %_p_vec_full = load <4 x i16>, <4 x i16>* %vector_ptr, align 8 - %mulp_vec = mul <4 x i16> %_p_vec_full, + %mulp_vec = mul <4 x i16> %_p_vec_full, %vector_ptr15 = bitcast i16* %p_arrayidx1 to <4 x i16>* %_p_vec_full16 = load <4 x i16>, <4 x i16>* %vector_ptr15, align 8 %addp_vec = add <4 x i16> %_p_vec_full16, %mulp_vec Index: test/CodeGen/X86/combine-shl.ll =================================================================== --- test/CodeGen/X86/combine-shl.ll +++ test/CodeGen/X86/combine-shl.ll @@ -581,10 +581,10 @@ ; ; AVX-LABEL: combine_vec_shl_mul0: ; AVX: # BB#0: -; AVX-NEXT: vpbroadcastd {{.*#+}} xmm1 = [20,20,20,20] +; AVX-NEXT: vpbroadcastd {{.*#+}} xmm1 = [44,44,44,44] ; AVX-NEXT: vpmulld %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq - %1 = mul <4 x i32> %x, + %1 = mul <4 x i32> %x, %2 = shl <4 x i32> %1, ret <4 x i32> %2 } Index: test/CodeGen/X86/masked_gather_scatter.ll =================================================================== --- test/CodeGen/X86/masked_gather_scatter.ll +++ test/CodeGen/X86/masked_gather_scatter.ll @@ -464,7 +464,9 @@ ; KNL_64-NEXT: vpsllq $32, %zmm0, %zmm0 ; KNL_64-NEXT: vpaddq %zmm2, %zmm0, %zmm0 ; KNL_64-NEXT: vpmovsxdq %ymm1, %zmm1 -; KNL_64-NEXT: vpmuldq {{.*}}(%rip){1to8}, %zmm1, %zmm1 +; KNL_64-NEXT: vpsllq $2, %zmm1, %zmm2 +; KNL_64-NEXT: vpaddq %zmm1, %zmm2, %zmm1 +; KNL_64-NEXT: vpsllq $4, %zmm1, %zmm1 ; KNL_64-NEXT: vpaddq %zmm1, %zmm0, %zmm0 ; KNL_64-NEXT: vpaddq %zmm0, %zmm4, %zmm0 ; KNL_64-NEXT: vpaddq {{.*}}(%rip){1to8}, %zmm0, %zmm1 @@ -475,11 +477,12 @@ ; KNL_32-LABEL: test9: ; KNL_32: # BB#0: # %entry ; KNL_32-NEXT: vpbroadcastd {{[0-9]+}}(%esp), %ymm2 -; KNL_32-NEXT: vpbroadcastd {{.*#+}} ymm3 = [80,80,80,80,80,80,80,80] -; KNL_32-NEXT: vpmulld %ymm3, %ymm1, %ymm1 ; KNL_32-NEXT: vpmovqd %zmm0, %ymm0 ; KNL_32-NEXT: vpbroadcastd {{.*#+}} ymm3 = [820,820,820,820,820,820,820,820] ; KNL_32-NEXT: vpmulld %ymm3, %ymm0, %ymm0 +; KNL_32-NEXT: vpslld $2, %ymm1, %ymm3 +; KNL_32-NEXT: vpaddd %ymm1, %ymm3, %ymm1 +; KNL_32-NEXT: vpslld $4, %ymm1, %ymm1 ; KNL_32-NEXT: vpaddd %ymm1, %ymm0, %ymm0 ; KNL_32-NEXT: vpbroadcastd {{.*#+}} ymm1 = [68,68,68,68,68,68,68,68] ; KNL_32-NEXT: vpaddd %ymm1, %ymm0, %ymm0 @@ -494,7 +497,9 @@ ; SKX-NEXT: vpbroadcastq %rdi, %zmm2 ; SKX-NEXT: vpmullq {{.*}}(%rip){1to8}, %zmm0, %zmm0 ; SKX-NEXT: vpmovsxdq %ymm1, %zmm1 -; SKX-NEXT: vpmullq {{.*}}(%rip){1to8}, %zmm1, %zmm1 +; SKX-NEXT: vpsllq $2, %zmm1, %zmm3 +; SKX-NEXT: vpaddq %zmm1, %zmm3, %zmm1 +; SKX-NEXT: vpsllq $4, %zmm1, %zmm1 ; SKX-NEXT: vpaddq %zmm1, %zmm0, %zmm0 ; SKX-NEXT: vpaddq %zmm0, %zmm2, %zmm0 ; SKX-NEXT: vpaddq {{.*}}(%rip){1to8}, %zmm0, %zmm1 @@ -504,7 +509,9 @@ ; ; SKX_32-LABEL: test9: ; SKX_32: # BB#0: # %entry -; SKX_32-NEXT: vpmulld {{\.LCPI.*}}{1to8}, %ymm1, %ymm1 +; SKX_32-NEXT: vpslld $2, %ymm1, %ymm2 +; SKX_32-NEXT: vpaddd %ymm1, %ymm2, %ymm1 +; SKX_32-NEXT: vpslld $4, %ymm1, %ymm1 ; SKX_32-NEXT: vpmovqd %zmm0, %ymm0 ; SKX_32-NEXT: vpmulld {{\.LCPI.*}}{1to8}, %ymm0, %ymm0 ; SKX_32-NEXT: vpaddd {{[0-9]+}}(%esp){1to8}, %ymm0, %ymm0 @@ -533,7 +540,9 @@ ; KNL_64-NEXT: vpsllq $32, %zmm0, %zmm0 ; KNL_64-NEXT: vpaddq %zmm2, %zmm0, %zmm0 ; KNL_64-NEXT: vpmovsxdq %ymm1, %zmm1 -; KNL_64-NEXT: vpmuldq {{.*}}(%rip){1to8}, %zmm1, %zmm1 +; KNL_64-NEXT: vpsllq $2, %zmm1, %zmm2 +; KNL_64-NEXT: vpaddq %zmm1, %zmm2, %zmm1 +; KNL_64-NEXT: vpsllq $4, %zmm1, %zmm1 ; KNL_64-NEXT: vpaddq %zmm1, %zmm0, %zmm0 ; KNL_64-NEXT: vpaddq %zmm0, %zmm4, %zmm0 ; KNL_64-NEXT: vpaddq {{.*}}(%rip){1to8}, %zmm0, %zmm1 @@ -544,11 +553,12 @@ ; KNL_32-LABEL: test10: ; KNL_32: # BB#0: # %entry ; KNL_32-NEXT: vpbroadcastd {{[0-9]+}}(%esp), %ymm2 -; KNL_32-NEXT: vpbroadcastd {{.*#+}} ymm3 = [80,80,80,80,80,80,80,80] -; KNL_32-NEXT: vpmulld %ymm3, %ymm1, %ymm1 ; KNL_32-NEXT: vpmovqd %zmm0, %ymm0 ; KNL_32-NEXT: vpbroadcastd {{.*#+}} ymm3 = [820,820,820,820,820,820,820,820] ; KNL_32-NEXT: vpmulld %ymm3, %ymm0, %ymm0 +; KNL_32-NEXT: vpslld $2, %ymm1, %ymm3 +; KNL_32-NEXT: vpaddd %ymm1, %ymm3, %ymm1 +; KNL_32-NEXT: vpslld $4, %ymm1, %ymm1 ; KNL_32-NEXT: vpaddd %ymm1, %ymm0, %ymm0 ; KNL_32-NEXT: vpbroadcastd {{.*#+}} ymm1 = [68,68,68,68,68,68,68,68] ; KNL_32-NEXT: vpaddd %ymm1, %ymm0, %ymm0 @@ -563,7 +573,9 @@ ; SKX-NEXT: vpbroadcastq %rdi, %zmm2 ; SKX-NEXT: vpmullq {{.*}}(%rip){1to8}, %zmm0, %zmm0 ; SKX-NEXT: vpmovsxdq %ymm1, %zmm1 -; SKX-NEXT: vpmullq {{.*}}(%rip){1to8}, %zmm1, %zmm1 +; SKX-NEXT: vpsllq $2, %zmm1, %zmm3 +; SKX-NEXT: vpaddq %zmm1, %zmm3, %zmm1 +; SKX-NEXT: vpsllq $4, %zmm1, %zmm1 ; SKX-NEXT: vpaddq %zmm1, %zmm0, %zmm0 ; SKX-NEXT: vpaddq %zmm0, %zmm2, %zmm0 ; SKX-NEXT: vpaddq {{.*}}(%rip){1to8}, %zmm0, %zmm1 @@ -573,7 +585,9 @@ ; ; SKX_32-LABEL: test10: ; SKX_32: # BB#0: # %entry -; SKX_32-NEXT: vpmulld {{\.LCPI.*}}{1to8}, %ymm1, %ymm1 +; SKX_32-NEXT: vpslld $2, %ymm1, %ymm2 +; SKX_32-NEXT: vpaddd %ymm1, %ymm2, %ymm1 +; SKX_32-NEXT: vpslld $4, %ymm1, %ymm1 ; SKX_32-NEXT: vpmovqd %zmm0, %ymm0 ; SKX_32-NEXT: vpmulld {{\.LCPI.*}}{1to8}, %ymm0, %ymm0 ; SKX_32-NEXT: vpaddd {{[0-9]+}}(%esp){1to8}, %ymm0, %ymm0 @@ -1575,8 +1589,182 @@ ; Check non-power-of-2 case. It should be scalarized. declare <3 x i32> @llvm.masked.gather.v3i32.v3p0i32(<3 x i32*>, i32, <3 x i1>, <3 x i32>) define <3 x i32> @test30(<3 x i32*> %base, <3 x i32> %ind, <3 x i1> %mask, <3 x i32> %src0) { -; ALL-LABEL: test30 -; ALL-NOT: gather +; KNL_64-LABEL: test30: +; KNL_64: # BB#0: +; KNL_64-NEXT: kmovw %edx, %k0 +; KNL_64-NEXT: kmovw %esi, %k2 +; KNL_64-NEXT: vpmovsxdq %xmm1, %ymm1 +; KNL_64-NEXT: vpsllq $2, %ymm1, %ymm1 +; KNL_64-NEXT: vpaddq %ymm1, %ymm0, %ymm1 +; KNL_64-NEXT: testb $1, %dil +; KNL_64-NEXT: # implicit-def: %XMM0 +; KNL_64-NEXT: je .LBB30_2 +; KNL_64-NEXT: # BB#1: # %cond.load +; KNL_64-NEXT: vmovq %xmm1, %rax +; KNL_64-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; KNL_64-NEXT: .LBB30_2: # %else +; KNL_64-NEXT: kmovw %edi, %k1 +; KNL_64-NEXT: kshiftlw $15, %k2, %k2 +; KNL_64-NEXT: kshiftrw $15, %k2, %k2 +; KNL_64-NEXT: kmovw %k2, %eax +; KNL_64-NEXT: testb $1, %al +; KNL_64-NEXT: je .LBB30_4 +; KNL_64-NEXT: # BB#3: # %cond.load1 +; KNL_64-NEXT: vpextrq $1, %xmm1, %rax +; KNL_64-NEXT: vpinsrd $1, (%rax), %xmm0, %xmm0 +; KNL_64-NEXT: .LBB30_4: # %else2 +; KNL_64-NEXT: kshiftlw $15, %k0, %k0 +; KNL_64-NEXT: kshiftrw $15, %k0, %k0 +; KNL_64-NEXT: kmovw %k0, %eax +; KNL_64-NEXT: testb $1, %al +; KNL_64-NEXT: je .LBB30_6 +; KNL_64-NEXT: # BB#5: # %cond.load4 +; KNL_64-NEXT: vextracti128 $1, %ymm1, %xmm1 +; KNL_64-NEXT: vmovq %xmm1, %rax +; KNL_64-NEXT: vpinsrd $2, (%rax), %xmm0, %xmm0 +; KNL_64-NEXT: .LBB30_6: # %else5 +; KNL_64-NEXT: kmovw %k2, %eax +; KNL_64-NEXT: kshiftlw $15, %k1, %k1 +; KNL_64-NEXT: kshiftrw $15, %k1, %k1 +; KNL_64-NEXT: kmovw %k1, %ecx +; KNL_64-NEXT: vmovd %ecx, %xmm1 +; KNL_64-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1 +; KNL_64-NEXT: kmovw %k0, %eax +; KNL_64-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1 +; KNL_64-NEXT: vpslld $31, %xmm1, %xmm1 +; KNL_64-NEXT: vblendvps %xmm1, %xmm0, %xmm2, %xmm0 +; KNL_64-NEXT: vzeroupper +; KNL_64-NEXT: retq +; +; KNL_32-LABEL: test30: +; KNL_32: # BB#0: +; KNL_32-NEXT: movl {{[0-9]+}}(%esp), %eax +; KNL_32-NEXT: kmovw %eax, %k0 +; KNL_32-NEXT: movl {{[0-9]+}}(%esp), %eax +; KNL_32-NEXT: kmovw %eax, %k2 +; KNL_32-NEXT: movl {{[0-9]+}}(%esp), %eax +; KNL_32-NEXT: vpslld $2, %xmm1, %xmm1 +; KNL_32-NEXT: vpaddd %xmm1, %xmm0, %xmm1 +; KNL_32-NEXT: testb $1, %al +; KNL_32-NEXT: # implicit-def: %XMM0 +; KNL_32-NEXT: je .LBB30_2 +; KNL_32-NEXT: # BB#1: # %cond.load +; KNL_32-NEXT: vmovd %xmm1, %ecx +; KNL_32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; KNL_32-NEXT: .LBB30_2: # %else +; KNL_32-NEXT: kmovw %eax, %k1 +; KNL_32-NEXT: kshiftlw $15, %k2, %k2 +; KNL_32-NEXT: kshiftrw $15, %k2, %k2 +; KNL_32-NEXT: kmovw %k2, %eax +; KNL_32-NEXT: testb $1, %al +; KNL_32-NEXT: je .LBB30_4 +; KNL_32-NEXT: # BB#3: # %cond.load1 +; KNL_32-NEXT: vpextrd $1, %xmm1, %eax +; KNL_32-NEXT: vpinsrd $1, (%eax), %xmm0, %xmm0 +; KNL_32-NEXT: .LBB30_4: # %else2 +; KNL_32-NEXT: kshiftlw $15, %k0, %k0 +; KNL_32-NEXT: kshiftrw $15, %k0, %k0 +; KNL_32-NEXT: kmovw %k0, %eax +; KNL_32-NEXT: testb $1, %al +; KNL_32-NEXT: je .LBB30_6 +; KNL_32-NEXT: # BB#5: # %cond.load4 +; KNL_32-NEXT: vpextrd $2, %xmm1, %eax +; KNL_32-NEXT: vpinsrd $2, (%eax), %xmm0, %xmm0 +; KNL_32-NEXT: .LBB30_6: # %else5 +; KNL_32-NEXT: kmovw %k2, %eax +; KNL_32-NEXT: kshiftlw $15, %k1, %k1 +; KNL_32-NEXT: kshiftrw $15, %k1, %k1 +; KNL_32-NEXT: kmovw %k1, %ecx +; KNL_32-NEXT: vmovd %ecx, %xmm1 +; KNL_32-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1 +; KNL_32-NEXT: kmovw %k0, %eax +; KNL_32-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1 +; KNL_32-NEXT: vpslld $31, %xmm1, %xmm1 +; KNL_32-NEXT: vblendvps %xmm1, %xmm0, %xmm2, %xmm0 +; KNL_32-NEXT: retl +; +; SKX-LABEL: test30: +; SKX: # BB#0: +; SKX-NEXT: vpslld $31, %xmm2, %xmm2 +; SKX-NEXT: vptestmd %xmm2, %xmm2, %k1 +; SKX-NEXT: kshiftlw $15, %k1, %k0 +; SKX-NEXT: kshiftrw $15, %k0, %k0 +; SKX-NEXT: kmovw %k0, %eax +; SKX-NEXT: vpmovsxdq %xmm1, %ymm1 +; SKX-NEXT: vpsllq $2, %ymm1, %ymm1 +; SKX-NEXT: vpaddq %ymm1, %ymm0, %ymm1 +; SKX-NEXT: testb $1, %al +; SKX-NEXT: # implicit-def: %XMM0 +; SKX-NEXT: je .LBB30_2 +; SKX-NEXT: # BB#1: # %cond.load +; SKX-NEXT: vmovq %xmm1, %rax +; SKX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SKX-NEXT: .LBB30_2: # %else +; SKX-NEXT: kshiftlw $14, %k1, %k0 +; SKX-NEXT: kshiftrw $15, %k0, %k0 +; SKX-NEXT: kmovw %k0, %eax +; SKX-NEXT: testb $1, %al +; SKX-NEXT: je .LBB30_4 +; SKX-NEXT: # BB#3: # %cond.load1 +; SKX-NEXT: vpextrq $1, %xmm1, %rax +; SKX-NEXT: vpinsrd $1, (%rax), %xmm0, %xmm0 +; SKX-NEXT: .LBB30_4: # %else2 +; SKX-NEXT: kshiftlw $13, %k1, %k0 +; SKX-NEXT: kshiftrw $15, %k0, %k0 +; SKX-NEXT: kmovw %k0, %eax +; SKX-NEXT: testb $1, %al +; SKX-NEXT: je .LBB30_6 +; SKX-NEXT: # BB#5: # %cond.load4 +; SKX-NEXT: vextracti128 $1, %ymm1, %xmm1 +; SKX-NEXT: vmovq %xmm1, %rax +; SKX-NEXT: vpinsrd $2, (%rax), %xmm0, %xmm0 +; SKX-NEXT: .LBB30_6: # %else5 +; SKX-NEXT: vmovdqa32 %xmm0, %xmm3 {%k1} +; SKX-NEXT: vmovdqa %xmm3, %xmm0 +; SKX-NEXT: vzeroupper +; SKX-NEXT: retq +; +; SKX_32-LABEL: test30: +; SKX_32: # BB#0: +; SKX_32-NEXT: subl $12, %esp +; SKX_32-NEXT: .Lcfi0: +; SKX_32-NEXT: .cfi_def_cfa_offset 16 +; SKX_32-NEXT: vpslld $31, %xmm2, %xmm2 +; SKX_32-NEXT: vptestmd %xmm2, %xmm2, %k1 +; SKX_32-NEXT: kshiftlw $15, %k1, %k0 +; SKX_32-NEXT: kshiftrw $15, %k0, %k0 +; SKX_32-NEXT: kmovw %k0, %eax +; SKX_32-NEXT: vpslld $2, %xmm1, %xmm1 +; SKX_32-NEXT: vpaddd %xmm1, %xmm0, %xmm2 +; SKX_32-NEXT: testb $1, %al +; SKX_32-NEXT: # implicit-def: %XMM1 +; SKX_32-NEXT: je .LBB30_2 +; SKX_32-NEXT: # BB#1: # %cond.load +; SKX_32-NEXT: vmovd %xmm2, %eax +; SKX_32-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; SKX_32-NEXT: .LBB30_2: # %else +; SKX_32-NEXT: kshiftlw $14, %k1, %k0 +; SKX_32-NEXT: kshiftrw $15, %k0, %k0 +; SKX_32-NEXT: kmovw %k0, %eax +; SKX_32-NEXT: testb $1, %al +; SKX_32-NEXT: je .LBB30_4 +; SKX_32-NEXT: # BB#3: # %cond.load1 +; SKX_32-NEXT: vpextrd $1, %xmm2, %eax +; SKX_32-NEXT: vpinsrd $1, (%eax), %xmm1, %xmm1 +; SKX_32-NEXT: .LBB30_4: # %else2 +; SKX_32-NEXT: vmovdqa {{[0-9]+}}(%esp), %xmm0 +; SKX_32-NEXT: kshiftlw $13, %k1, %k0 +; SKX_32-NEXT: kshiftrw $15, %k0, %k0 +; SKX_32-NEXT: kmovw %k0, %eax +; SKX_32-NEXT: testb $1, %al +; SKX_32-NEXT: je .LBB30_6 +; SKX_32-NEXT: # BB#5: # %cond.load4 +; SKX_32-NEXT: vpextrd $2, %xmm2, %eax +; SKX_32-NEXT: vpinsrd $2, (%eax), %xmm1, %xmm1 +; SKX_32-NEXT: .LBB30_6: # %else5 +; SKX_32-NEXT: vmovdqa32 %xmm1, %xmm0 {%k1} +; SKX_32-NEXT: addl $12, %esp +; SKX_32-NEXT: retl %sext_ind = sext <3 x i32> %ind to <3 x i64> %gep.random = getelementptr i32, <3 x i32*> %base, <3 x i64> %sext_ind Index: test/CodeGen/X86/vector-idiv-sdiv-128.ll =================================================================== --- test/CodeGen/X86/vector-idiv-sdiv-128.ll +++ test/CodeGen/X86/vector-idiv-sdiv-128.ll @@ -405,14 +405,10 @@ ; SSE2-NEXT: psrld $31, %xmm2 ; SSE2-NEXT: psrad $2, %xmm1 ; SSE2-NEXT: paddd %xmm2, %xmm1 -; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [7,7,7,7] -; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3] -; SSE2-NEXT: pmuludq %xmm2, %xmm1 -; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] -; SSE2-NEXT: pmuludq %xmm2, %xmm3 -; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,2,2,3] -; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] -; SSE2-NEXT: psubd %xmm1, %xmm0 +; SSE2-NEXT: movdqa %xmm1, %xmm2 +; SSE2-NEXT: pslld $3, %xmm2 +; SSE2-NEXT: psubd %xmm1, %xmm2 +; SSE2-NEXT: psubd %xmm2, %xmm0 ; SSE2-NEXT: retq ; ; SSE41-LABEL: test_rem7_4i32: @@ -429,8 +425,10 @@ ; SSE41-NEXT: psrld $31, %xmm2 ; SSE41-NEXT: psrad $2, %xmm1 ; SSE41-NEXT: paddd %xmm2, %xmm1 -; SSE41-NEXT: pmulld {{.*}}(%rip), %xmm1 -; SSE41-NEXT: psubd %xmm1, %xmm0 +; SSE41-NEXT: movdqa %xmm1, %xmm2 +; SSE41-NEXT: pslld $3, %xmm2 +; SSE41-NEXT: psubd %xmm1, %xmm2 +; SSE41-NEXT: psubd %xmm2, %xmm0 ; SSE41-NEXT: retq ; ; AVX1-LABEL: test_rem7_4i32: @@ -446,7 +444,8 @@ ; AVX1-NEXT: vpsrld $31, %xmm1, %xmm2 ; AVX1-NEXT: vpsrad $2, %xmm1, %xmm1 ; AVX1-NEXT: vpaddd %xmm2, %xmm1, %xmm1 -; AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1 +; AVX1-NEXT: vpslld $3, %xmm1, %xmm2 +; AVX1-NEXT: vpsubd %xmm1, %xmm2, %xmm1 ; AVX1-NEXT: vpsubd %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: retq ; @@ -463,8 +462,8 @@ ; AVX2-NEXT: vpsrld $31, %xmm1, %xmm2 ; AVX2-NEXT: vpsrad $2, %xmm1, %xmm1 ; AVX2-NEXT: vpaddd %xmm2, %xmm1, %xmm1 -; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [7,7,7,7] -; AVX2-NEXT: vpmulld %xmm2, %xmm1, %xmm1 +; AVX2-NEXT: vpslld $3, %xmm1, %xmm2 +; AVX2-NEXT: vpsubd %xmm1, %xmm2, %xmm1 ; AVX2-NEXT: vpsubd %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: retq %res = srem <4 x i32> %a, @@ -480,8 +479,10 @@ ; SSE-NEXT: psrlw $15, %xmm2 ; SSE-NEXT: psraw $1, %xmm1 ; SSE-NEXT: paddw %xmm2, %xmm1 -; SSE-NEXT: pmullw {{.*}}(%rip), %xmm1 -; SSE-NEXT: psubw %xmm1, %xmm0 +; SSE-NEXT: movdqa %xmm1, %xmm2 +; SSE-NEXT: psllw $3, %xmm2 +; SSE-NEXT: psubw %xmm1, %xmm2 +; SSE-NEXT: psubw %xmm2, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: test_rem7_8i16: @@ -490,7 +491,8 @@ ; AVX-NEXT: vpsrlw $15, %xmm1, %xmm2 ; AVX-NEXT: vpsraw $1, %xmm1, %xmm1 ; AVX-NEXT: vpaddw %xmm2, %xmm1, %xmm1 -; AVX-NEXT: vpmullw {{.*}}(%rip), %xmm1, %xmm1 +; AVX-NEXT: vpsllw $3, %xmm1, %xmm2 +; AVX-NEXT: vpsubw %xmm1, %xmm2, %xmm1 ; AVX-NEXT: vpsubw %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq %res = srem <8 x i16> %a, @@ -500,40 +502,32 @@ define <16 x i8> @test_rem7_16i8(<16 x i8> %a) nounwind { ; SSE2-LABEL: test_rem7_16i8: ; SSE2: # BB#0: -; SSE2-NEXT: movdqa %xmm0, %xmm2 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] -; SSE2-NEXT: psraw $8, %xmm2 -; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [65427,65427,65427,65427,65427,65427,65427,65427] -; SSE2-NEXT: pmullw %xmm3, %xmm2 -; SSE2-NEXT: psrlw $8, %xmm2 ; SSE2-NEXT: movdqa %xmm0, %xmm1 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] ; SSE2-NEXT: psraw $8, %xmm1 -; SSE2-NEXT: pmullw %xmm3, %xmm1 +; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [65427,65427,65427,65427,65427,65427,65427,65427] +; SSE2-NEXT: pmullw %xmm2, %xmm1 ; SSE2-NEXT: psrlw $8, %xmm1 -; SSE2-NEXT: packuswb %xmm2, %xmm1 -; SSE2-NEXT: paddb %xmm0, %xmm1 -; SSE2-NEXT: movdqa %xmm1, %xmm2 -; SSE2-NEXT: psrlw $2, %xmm2 -; SSE2-NEXT: pand {{.*}}(%rip), %xmm2 -; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32] -; SSE2-NEXT: pxor %xmm3, %xmm2 -; SSE2-NEXT: psubb %xmm3, %xmm2 -; SSE2-NEXT: psrlw $7, %xmm1 +; SSE2-NEXT: movdqa %xmm0, %xmm3 +; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; SSE2-NEXT: psraw $8, %xmm3 +; SSE2-NEXT: pmullw %xmm2, %xmm3 +; SSE2-NEXT: psrlw $8, %xmm3 +; SSE2-NEXT: packuswb %xmm1, %xmm3 +; SSE2-NEXT: paddb %xmm0, %xmm3 +; SSE2-NEXT: movdqa %xmm3, %xmm1 +; SSE2-NEXT: psrlw $2, %xmm1 ; SSE2-NEXT: pand {{.*}}(%rip), %xmm1 -; SSE2-NEXT: paddb %xmm2, %xmm1 -; SSE2-NEXT: movdqa %xmm1, %xmm2 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] -; SSE2-NEXT: psraw $8, %xmm2 -; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [7,7,7,7,7,7,7,7] -; SSE2-NEXT: pmullw %xmm3, %xmm2 -; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255] -; SSE2-NEXT: pand %xmm4, %xmm2 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; SSE2-NEXT: psraw $8, %xmm1 -; SSE2-NEXT: pmullw %xmm3, %xmm1 -; SSE2-NEXT: pand %xmm4, %xmm1 -; SSE2-NEXT: packuswb %xmm2, %xmm1 +; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32] +; SSE2-NEXT: pxor %xmm2, %xmm1 +; SSE2-NEXT: psubb %xmm2, %xmm1 +; SSE2-NEXT: psrlw $7, %xmm3 +; SSE2-NEXT: pand {{.*}}(%rip), %xmm3 +; SSE2-NEXT: paddb %xmm1, %xmm3 +; SSE2-NEXT: movdqa %xmm3, %xmm1 +; SSE2-NEXT: psllw $3, %xmm1 +; SSE2-NEXT: pand {{.*}}(%rip), %xmm1 +; SSE2-NEXT: psubb %xmm3, %xmm1 ; SSE2-NEXT: psubb %xmm1, %xmm0 ; SSE2-NEXT: retq ; @@ -558,16 +552,10 @@ ; SSE41-NEXT: psrlw $7, %xmm1 ; SSE41-NEXT: pand {{.*}}(%rip), %xmm1 ; SSE41-NEXT: paddb %xmm2, %xmm1 -; SSE41-NEXT: pmovsxbw %xmm1, %xmm2 -; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [7,7,7,7,7,7,7,7] -; SSE41-NEXT: pmullw %xmm3, %xmm2 -; SSE41-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255] -; SSE41-NEXT: pand %xmm4, %xmm2 -; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] -; SSE41-NEXT: pmovsxbw %xmm1, %xmm1 -; SSE41-NEXT: pmullw %xmm3, %xmm1 -; SSE41-NEXT: pand %xmm4, %xmm1 -; SSE41-NEXT: packuswb %xmm1, %xmm2 +; SSE41-NEXT: movdqa %xmm1, %xmm2 +; SSE41-NEXT: psllw $3, %xmm2 +; SSE41-NEXT: pand {{.*}}(%rip), %xmm2 +; SSE41-NEXT: psubb %xmm1, %xmm2 ; SSE41-NEXT: psubb %xmm2, %xmm0 ; SSE41-NEXT: retq ; @@ -591,16 +579,9 @@ ; AVX1-NEXT: vpsrlw $7, %xmm1, %xmm1 ; AVX1-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1 ; AVX1-NEXT: vpaddb %xmm1, %xmm2, %xmm1 -; AVX1-NEXT: vpmovsxbw %xmm1, %xmm2 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [7,7,7,7,7,7,7,7] -; AVX1-NEXT: vpmullw %xmm3, %xmm2, %xmm2 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255] -; AVX1-NEXT: vpand %xmm4, %xmm2, %xmm2 -; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] -; AVX1-NEXT: vpmovsxbw %xmm1, %xmm1 -; AVX1-NEXT: vpmullw %xmm3, %xmm1, %xmm1 -; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm1 -; AVX1-NEXT: vpackuswb %xmm1, %xmm2, %xmm1 +; AVX1-NEXT: vpsllw $3, %xmm1, %xmm2 +; AVX1-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2 +; AVX1-NEXT: vpsubb %xmm1, %xmm2, %xmm1 ; AVX1-NEXT: vpsubb %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: retq ; @@ -620,13 +601,9 @@ ; AVX2NOBW-NEXT: vpsrlw $7, %xmm1, %xmm1 ; AVX2NOBW-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1 ; AVX2NOBW-NEXT: vpaddb %xmm1, %xmm2, %xmm1 -; AVX2NOBW-NEXT: vpmovsxbw %xmm1, %ymm1 -; AVX2NOBW-NEXT: vpmullw {{.*}}(%rip), %ymm1, %ymm1 -; AVX2NOBW-NEXT: vextracti128 $1, %ymm1, %xmm2 -; AVX2NOBW-NEXT: vmovdqa {{.*#+}} xmm3 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u> -; AVX2NOBW-NEXT: vpshufb %xmm3, %xmm2, %xmm2 -; AVX2NOBW-NEXT: vpshufb %xmm3, %xmm1, %xmm1 -; AVX2NOBW-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] +; AVX2NOBW-NEXT: vpsllw $3, %xmm1, %xmm2 +; AVX2NOBW-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2 +; AVX2NOBW-NEXT: vpsubb %xmm1, %xmm2, %xmm1 ; AVX2NOBW-NEXT: vpsubb %xmm1, %xmm0, %xmm0 ; AVX2NOBW-NEXT: vzeroupper ; AVX2NOBW-NEXT: retq @@ -646,9 +623,9 @@ ; AVX512BW-NEXT: vpsrlw $7, %xmm1, %xmm1 ; AVX512BW-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1 ; AVX512BW-NEXT: vpaddb %xmm1, %xmm2, %xmm1 -; AVX512BW-NEXT: vpmovsxbw %xmm1, %ymm1 -; AVX512BW-NEXT: vpmullw {{.*}}(%rip), %ymm1, %ymm1 -; AVX512BW-NEXT: vpmovwb %zmm1, %ymm1 +; AVX512BW-NEXT: vpsllw $3, %xmm1, %xmm2 +; AVX512BW-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2 +; AVX512BW-NEXT: vpsubb %xmm1, %xmm2, %xmm1 ; AVX512BW-NEXT: vpsubb %xmm1, %xmm0, %xmm0 ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq Index: test/CodeGen/X86/vector-idiv-sdiv-256.ll =================================================================== --- test/CodeGen/X86/vector-idiv-sdiv-256.ll +++ test/CodeGen/X86/vector-idiv-sdiv-256.ll @@ -376,20 +376,21 @@ ; AVX1-NEXT: vpsrld $31, %xmm2, %xmm3 ; AVX1-NEXT: vpsrad $2, %xmm2, %xmm2 ; AVX1-NEXT: vpaddd %xmm3, %xmm2, %xmm2 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [7,7,7,7] -; AVX1-NEXT: vpmulld %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vpslld $3, %xmm2, %xmm3 +; AVX1-NEXT: vpsubd %xmm2, %xmm3, %xmm2 ; AVX1-NEXT: vpsubd %xmm2, %xmm4, %xmm2 -; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[1,1,3,3] -; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm0[1,1,3,3] -; AVX1-NEXT: vpmuldq %xmm4, %xmm5, %xmm4 +; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[1,1,3,3] +; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[1,1,3,3] +; AVX1-NEXT: vpmuldq %xmm3, %xmm4, %xmm3 ; AVX1-NEXT: vpmuldq %xmm1, %xmm0, %xmm1 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] -; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm4[2,3],xmm1[4,5],xmm4[6,7] +; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7] ; AVX1-NEXT: vpaddd %xmm0, %xmm1, %xmm1 -; AVX1-NEXT: vpsrld $31, %xmm1, %xmm4 +; AVX1-NEXT: vpsrld $31, %xmm1, %xmm3 ; AVX1-NEXT: vpsrad $2, %xmm1, %xmm1 -; AVX1-NEXT: vpaddd %xmm4, %xmm1, %xmm1 -; AVX1-NEXT: vpmulld %xmm3, %xmm1, %xmm1 +; AVX1-NEXT: vpaddd %xmm3, %xmm1, %xmm1 +; AVX1-NEXT: vpslld $3, %xmm1, %xmm3 +; AVX1-NEXT: vpsubd %xmm1, %xmm3, %xmm1 ; AVX1-NEXT: vpsubd %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 ; AVX1-NEXT: retq @@ -407,8 +408,8 @@ ; AVX2-NEXT: vpsrld $31, %ymm1, %ymm2 ; AVX2-NEXT: vpsrad $2, %ymm1, %ymm1 ; AVX2-NEXT: vpaddd %ymm2, %ymm1, %ymm1 -; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm2 = [7,7,7,7,7,7,7,7] -; AVX2-NEXT: vpmulld %ymm2, %ymm1, %ymm1 +; AVX2-NEXT: vpslld $3, %ymm1, %ymm2 +; AVX2-NEXT: vpsubd %ymm1, %ymm2, %ymm1 ; AVX2-NEXT: vpsubd %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: retq %res = srem <8 x i32> %a, @@ -424,14 +425,15 @@ ; AVX1-NEXT: vpsrlw $15, %xmm3, %xmm4 ; AVX1-NEXT: vpsraw $1, %xmm3, %xmm3 ; AVX1-NEXT: vpaddw %xmm4, %xmm3, %xmm3 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [7,7,7,7,7,7,7,7] -; AVX1-NEXT: vpmullw %xmm4, %xmm3, %xmm3 +; AVX1-NEXT: vpsllw $3, %xmm3, %xmm4 +; AVX1-NEXT: vpsubw %xmm3, %xmm4, %xmm3 ; AVX1-NEXT: vpsubw %xmm3, %xmm1, %xmm1 ; AVX1-NEXT: vpmulhw %xmm2, %xmm0, %xmm2 ; AVX1-NEXT: vpsrlw $15, %xmm2, %xmm3 ; AVX1-NEXT: vpsraw $1, %xmm2, %xmm2 ; AVX1-NEXT: vpaddw %xmm3, %xmm2, %xmm2 -; AVX1-NEXT: vpmullw %xmm4, %xmm2, %xmm2 +; AVX1-NEXT: vpsllw $3, %xmm2, %xmm3 +; AVX1-NEXT: vpsubw %xmm2, %xmm3, %xmm2 ; AVX1-NEXT: vpsubw %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX1-NEXT: retq @@ -442,7 +444,8 @@ ; AVX2-NEXT: vpsrlw $15, %ymm1, %ymm2 ; AVX2-NEXT: vpsraw $1, %ymm1, %ymm1 ; AVX2-NEXT: vpaddw %ymm2, %ymm1, %ymm1 -; AVX2-NEXT: vpmullw {{.*}}(%rip), %ymm1, %ymm1 +; AVX2-NEXT: vpsllw $3, %ymm1, %ymm2 +; AVX2-NEXT: vpsubw %ymm1, %ymm2, %ymm1 ; AVX2-NEXT: vpsubw %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: retq %res = srem <16 x i16> %a, @@ -452,64 +455,53 @@ define <32 x i8> @test_rem7_32i8(<32 x i8> %a) nounwind { ; AVX1-LABEL: test_rem7_32i8: ; AVX1: # BB#0: -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 -; AVX1-NEXT: vpmovsxbw %xmm2, %xmm3 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [65427,65427,65427,65427,65427,65427,65427,65427] -; AVX1-NEXT: vpmullw %xmm1, %xmm3, %xmm3 -; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3 -; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm2[2,3,0,1] +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 +; AVX1-NEXT: vpmovsxbw %xmm1, %xmm2 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [65427,65427,65427,65427,65427,65427,65427,65427] +; AVX1-NEXT: vpmullw %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2 +; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[2,3,0,1] ; AVX1-NEXT: vpmovsxbw %xmm4, %xmm4 -; AVX1-NEXT: vpmullw %xmm1, %xmm4, %xmm4 +; AVX1-NEXT: vpmullw %xmm3, %xmm4, %xmm4 ; AVX1-NEXT: vpsrlw $8, %xmm4, %xmm4 -; AVX1-NEXT: vpackuswb %xmm4, %xmm3, %xmm3 -; AVX1-NEXT: vpaddb %xmm2, %xmm3, %xmm3 -; AVX1-NEXT: vpsrlw $7, %xmm3, %xmm4 +; AVX1-NEXT: vpackuswb %xmm4, %xmm2, %xmm2 +; AVX1-NEXT: vpaddb %xmm1, %xmm2, %xmm2 +; AVX1-NEXT: vpsrlw $7, %xmm2, %xmm4 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm8 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] ; AVX1-NEXT: vpand %xmm8, %xmm4, %xmm4 -; AVX1-NEXT: vpsrlw $2, %xmm3, %xmm3 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm9 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63] -; AVX1-NEXT: vpand %xmm9, %xmm3, %xmm3 +; AVX1-NEXT: vpsrlw $2, %xmm2, %xmm2 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63] +; AVX1-NEXT: vpand %xmm6, %xmm2, %xmm2 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm7 = [32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32] -; AVX1-NEXT: vpxor %xmm7, %xmm3, %xmm3 -; AVX1-NEXT: vpsubb %xmm7, %xmm3, %xmm3 -; AVX1-NEXT: vpaddb %xmm4, %xmm3, %xmm3 -; AVX1-NEXT: vpmovsxbw %xmm3, %xmm4 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [7,7,7,7,7,7,7,7] -; AVX1-NEXT: vpmullw %xmm5, %xmm4, %xmm4 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [255,255,255,255,255,255,255,255] -; AVX1-NEXT: vpand %xmm6, %xmm4, %xmm4 -; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,3,0,1] -; AVX1-NEXT: vpmovsxbw %xmm3, %xmm3 -; AVX1-NEXT: vpmullw %xmm5, %xmm3, %xmm3 -; AVX1-NEXT: vpand %xmm6, %xmm3, %xmm3 -; AVX1-NEXT: vpackuswb %xmm3, %xmm4, %xmm3 -; AVX1-NEXT: vpsubb %xmm3, %xmm2, %xmm2 -; AVX1-NEXT: vpmovsxbw %xmm0, %xmm3 -; AVX1-NEXT: vpmullw %xmm1, %xmm3, %xmm3 -; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3 +; AVX1-NEXT: vpxor %xmm7, %xmm2, %xmm2 +; AVX1-NEXT: vpsubb %xmm7, %xmm2, %xmm2 +; AVX1-NEXT: vpaddb %xmm4, %xmm2, %xmm2 +; AVX1-NEXT: vpsllw $3, %xmm2, %xmm4 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248] +; AVX1-NEXT: vpand %xmm5, %xmm4, %xmm4 +; AVX1-NEXT: vpsubb %xmm2, %xmm4, %xmm2 +; AVX1-NEXT: vpsubb %xmm2, %xmm1, %xmm1 +; AVX1-NEXT: vpmovsxbw %xmm0, %xmm2 +; AVX1-NEXT: vpmullw %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2 ; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[2,3,0,1] ; AVX1-NEXT: vpmovsxbw %xmm4, %xmm4 -; AVX1-NEXT: vpmullw %xmm1, %xmm4, %xmm1 -; AVX1-NEXT: vpsrlw $8, %xmm1, %xmm1 -; AVX1-NEXT: vpackuswb %xmm1, %xmm3, %xmm1 -; AVX1-NEXT: vpaddb %xmm0, %xmm1, %xmm1 -; AVX1-NEXT: vpsrlw $7, %xmm1, %xmm3 +; AVX1-NEXT: vpmullw %xmm3, %xmm4, %xmm3 +; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3 +; AVX1-NEXT: vpackuswb %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vpaddb %xmm0, %xmm2, %xmm2 +; AVX1-NEXT: vpsrlw $7, %xmm2, %xmm3 ; AVX1-NEXT: vpand %xmm8, %xmm3, %xmm3 -; AVX1-NEXT: vpsrlw $2, %xmm1, %xmm1 -; AVX1-NEXT: vpand %xmm9, %xmm1, %xmm1 -; AVX1-NEXT: vpxor %xmm7, %xmm1, %xmm1 -; AVX1-NEXT: vpsubb %xmm7, %xmm1, %xmm1 -; AVX1-NEXT: vpaddb %xmm3, %xmm1, %xmm1 -; AVX1-NEXT: vpmovsxbw %xmm1, %xmm3 -; AVX1-NEXT: vpmullw %xmm5, %xmm3, %xmm3 -; AVX1-NEXT: vpand %xmm6, %xmm3, %xmm3 -; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] -; AVX1-NEXT: vpmovsxbw %xmm1, %xmm1 -; AVX1-NEXT: vpmullw %xmm5, %xmm1, %xmm1 -; AVX1-NEXT: vpand %xmm6, %xmm1, %xmm1 -; AVX1-NEXT: vpackuswb %xmm1, %xmm3, %xmm1 -; AVX1-NEXT: vpsubb %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 +; AVX1-NEXT: vpsrlw $2, %xmm2, %xmm2 +; AVX1-NEXT: vpand %xmm6, %xmm2, %xmm2 +; AVX1-NEXT: vpxor %xmm7, %xmm2, %xmm2 +; AVX1-NEXT: vpsubb %xmm7, %xmm2, %xmm2 +; AVX1-NEXT: vpaddb %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vpsllw $3, %xmm2, %xmm3 +; AVX1-NEXT: vpand %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vpsubb %xmm2, %xmm3, %xmm2 +; AVX1-NEXT: vpsubb %xmm2, %xmm0, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX1-NEXT: retq ; ; AVX2NOBW-LABEL: test_rem7_32i8: @@ -534,22 +526,9 @@ ; AVX2NOBW-NEXT: vpsrlw $7, %ymm1, %ymm1 ; AVX2NOBW-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1 ; AVX2NOBW-NEXT: vpaddb %ymm1, %ymm2, %ymm1 -; AVX2NOBW-NEXT: vextracti128 $1, %ymm1, %xmm2 -; AVX2NOBW-NEXT: vpmovsxbw %xmm2, %ymm2 -; AVX2NOBW-NEXT: vmovdqa {{.*#+}} ymm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7] -; AVX2NOBW-NEXT: vpmullw %ymm3, %ymm2, %ymm2 -; AVX2NOBW-NEXT: vextracti128 $1, %ymm2, %xmm4 -; AVX2NOBW-NEXT: vmovdqa {{.*#+}} xmm5 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u> -; AVX2NOBW-NEXT: vpshufb %xmm5, %xmm4, %xmm4 -; AVX2NOBW-NEXT: vpshufb %xmm5, %xmm2, %xmm2 -; AVX2NOBW-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm4[0] -; AVX2NOBW-NEXT: vpmovsxbw %xmm1, %ymm1 -; AVX2NOBW-NEXT: vpmullw %ymm3, %ymm1, %ymm1 -; AVX2NOBW-NEXT: vextracti128 $1, %ymm1, %xmm3 -; AVX2NOBW-NEXT: vpshufb %xmm5, %xmm3, %xmm3 -; AVX2NOBW-NEXT: vpshufb %xmm5, %xmm1, %xmm1 -; AVX2NOBW-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0] -; AVX2NOBW-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 +; AVX2NOBW-NEXT: vpsllw $3, %ymm1, %ymm2 +; AVX2NOBW-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2 +; AVX2NOBW-NEXT: vpsubb %ymm1, %ymm2, %ymm1 ; AVX2NOBW-NEXT: vpsubb %ymm1, %ymm0, %ymm0 ; AVX2NOBW-NEXT: retq ; @@ -568,9 +547,9 @@ ; AVX512BW-NEXT: vpsrlw $7, %ymm1, %ymm1 ; AVX512BW-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1 ; AVX512BW-NEXT: vpaddb %ymm1, %ymm2, %ymm1 -; AVX512BW-NEXT: vpmovsxbw %ymm1, %zmm1 -; AVX512BW-NEXT: vpmullw {{.*}}(%rip), %zmm1, %zmm1 -; AVX512BW-NEXT: vpmovwb %zmm1, %ymm1 +; AVX512BW-NEXT: vpsllw $3, %ymm1, %ymm2 +; AVX512BW-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2 +; AVX512BW-NEXT: vpsubb %ymm1, %ymm2, %ymm1 ; AVX512BW-NEXT: vpsubb %ymm1, %ymm0, %ymm0 ; AVX512BW-NEXT: retq %res = srem <32 x i8> %a, Index: test/CodeGen/X86/vector-idiv-sdiv-512.ll =================================================================== --- test/CodeGen/X86/vector-idiv-sdiv-512.ll +++ test/CodeGen/X86/vector-idiv-sdiv-512.ll @@ -322,7 +322,8 @@ ; AVX-NEXT: vpsrld $31, %zmm1, %zmm2 ; AVX-NEXT: vpsrad $2, %zmm1, %zmm1 ; AVX-NEXT: vpaddd %zmm2, %zmm1, %zmm1 -; AVX-NEXT: vpmulld {{.*}}(%rip){1to16}, %zmm1, %zmm1 +; AVX-NEXT: vpslld $3, %zmm1, %zmm2 +; AVX-NEXT: vpsubd %zmm1, %zmm2, %zmm1 ; AVX-NEXT: vpsubd %zmm1, %zmm0, %zmm0 ; AVX-NEXT: retq %res = srem <16 x i32> %a, @@ -337,14 +338,15 @@ ; AVX512F-NEXT: vpsrlw $15, %ymm3, %ymm4 ; AVX512F-NEXT: vpsraw $1, %ymm3, %ymm3 ; AVX512F-NEXT: vpaddw %ymm4, %ymm3, %ymm3 -; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7] -; AVX512F-NEXT: vpmullw %ymm4, %ymm3, %ymm3 +; AVX512F-NEXT: vpsllw $3, %ymm3, %ymm4 +; AVX512F-NEXT: vpsubw %ymm3, %ymm4, %ymm3 ; AVX512F-NEXT: vpsubw %ymm3, %ymm0, %ymm0 ; AVX512F-NEXT: vpmulhw %ymm2, %ymm1, %ymm2 ; AVX512F-NEXT: vpsrlw $15, %ymm2, %ymm3 ; AVX512F-NEXT: vpsraw $1, %ymm2, %ymm2 ; AVX512F-NEXT: vpaddw %ymm3, %ymm2, %ymm2 -; AVX512F-NEXT: vpmullw %ymm4, %ymm2, %ymm2 +; AVX512F-NEXT: vpsllw $3, %ymm2, %ymm3 +; AVX512F-NEXT: vpsubw %ymm2, %ymm3, %ymm2 ; AVX512F-NEXT: vpsubw %ymm2, %ymm1, %ymm1 ; AVX512F-NEXT: retq ; @@ -354,7 +356,8 @@ ; AVX512BW-NEXT: vpsrlw $15, %zmm1, %zmm2 ; AVX512BW-NEXT: vpsraw $1, %zmm1, %zmm1 ; AVX512BW-NEXT: vpaddw %zmm2, %zmm1, %zmm1 -; AVX512BW-NEXT: vpmullw {{.*}}(%rip), %zmm1, %zmm1 +; AVX512BW-NEXT: vpsllw $3, %zmm1, %zmm2 +; AVX512BW-NEXT: vpsubw %zmm1, %zmm2, %zmm1 ; AVX512BW-NEXT: vpsubw %zmm1, %zmm0, %zmm0 ; AVX512BW-NEXT: retq %res = srem <32 x i16> %a, @@ -376,56 +379,42 @@ ; AVX512F-NEXT: vinserti128 $1, %xmm3, %ymm4, %ymm3 ; AVX512F-NEXT: vpackuswb %ymm5, %ymm3, %ymm3 ; AVX512F-NEXT: vpaddb %ymm0, %ymm3, %ymm3 -; AVX512F-NEXT: vpsrlw $7, %ymm3, %ymm5 -; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] -; AVX512F-NEXT: vpand %ymm4, %ymm5, %ymm7 +; AVX512F-NEXT: vpsrlw $7, %ymm3, %ymm4 +; AVX512F-NEXT: vmovdqa {{.*#+}} ymm5 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] +; AVX512F-NEXT: vpand %ymm5, %ymm4, %ymm4 ; AVX512F-NEXT: vpsrlw $2, %ymm3, %ymm3 -; AVX512F-NEXT: vmovdqa {{.*#+}} ymm5 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63] -; AVX512F-NEXT: vpand %ymm5, %ymm3, %ymm3 -; AVX512F-NEXT: vmovdqa {{.*#+}} ymm6 = [32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32] -; AVX512F-NEXT: vpxor %ymm6, %ymm3, %ymm3 -; AVX512F-NEXT: vpsubb %ymm6, %ymm3, %ymm3 -; AVX512F-NEXT: vpaddb %ymm7, %ymm3, %ymm7 -; AVX512F-NEXT: vpmovsxbw %xmm7, %ymm8 -; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7] -; AVX512F-NEXT: vpmullw %ymm3, %ymm8, %ymm8 -; AVX512F-NEXT: vpmovsxwd %ymm8, %zmm8 -; AVX512F-NEXT: vpmovdb %zmm8, %xmm8 -; AVX512F-NEXT: vextracti128 $1, %ymm7, %xmm7 -; AVX512F-NEXT: vpmovsxbw %xmm7, %ymm7 -; AVX512F-NEXT: vpmullw %ymm3, %ymm7, %ymm7 -; AVX512F-NEXT: vpmovsxwd %ymm7, %zmm7 -; AVX512F-NEXT: vpmovdb %zmm7, %xmm7 -; AVX512F-NEXT: vinserti128 $1, %xmm7, %ymm8, %ymm7 -; AVX512F-NEXT: vpsubb %ymm7, %ymm0, %ymm0 -; AVX512F-NEXT: vextracti128 $1, %ymm1, %xmm7 -; AVX512F-NEXT: vpmovsxbw %xmm7, %ymm7 -; AVX512F-NEXT: vpmullw %ymm2, %ymm7, %ymm7 -; AVX512F-NEXT: vpsrlw $8, %ymm7, %ymm7 -; AVX512F-NEXT: vpmovsxbw %xmm1, %ymm8 -; AVX512F-NEXT: vpmullw %ymm2, %ymm8, %ymm2 +; AVX512F-NEXT: vmovdqa {{.*#+}} ymm6 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63] +; AVX512F-NEXT: vpand %ymm6, %ymm3, %ymm3 +; AVX512F-NEXT: vmovdqa {{.*#+}} ymm7 = [32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32] +; AVX512F-NEXT: vpxor %ymm7, %ymm3, %ymm3 +; AVX512F-NEXT: vpsubb %ymm7, %ymm3, %ymm3 +; AVX512F-NEXT: vpaddb %ymm4, %ymm3, %ymm3 +; AVX512F-NEXT: vpsllw $3, %ymm3, %ymm4 +; AVX512F-NEXT: vmovdqa {{.*#+}} ymm8 = [248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248] +; AVX512F-NEXT: vpand %ymm8, %ymm4, %ymm4 +; AVX512F-NEXT: vpsubb %ymm3, %ymm4, %ymm3 +; AVX512F-NEXT: vpsubb %ymm3, %ymm0, %ymm0 +; AVX512F-NEXT: vextracti128 $1, %ymm1, %xmm3 +; AVX512F-NEXT: vpmovsxbw %xmm3, %ymm3 +; AVX512F-NEXT: vpmullw %ymm2, %ymm3, %ymm3 +; AVX512F-NEXT: vpsrlw $8, %ymm3, %ymm3 +; AVX512F-NEXT: vpmovsxbw %xmm1, %ymm4 +; AVX512F-NEXT: vpmullw %ymm2, %ymm4, %ymm2 ; AVX512F-NEXT: vpsrlw $8, %ymm2, %ymm2 -; AVX512F-NEXT: vperm2i128 {{.*#+}} ymm8 = ymm2[2,3],ymm7[2,3] -; AVX512F-NEXT: vinserti128 $1, %xmm7, %ymm2, %ymm2 -; AVX512F-NEXT: vpackuswb %ymm8, %ymm2, %ymm2 +; AVX512F-NEXT: vperm2i128 {{.*#+}} ymm4 = ymm2[2,3],ymm3[2,3] +; AVX512F-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2 +; AVX512F-NEXT: vpackuswb %ymm4, %ymm2, %ymm2 ; AVX512F-NEXT: vpaddb %ymm1, %ymm2, %ymm2 -; AVX512F-NEXT: vpsrlw $7, %ymm2, %ymm7 -; AVX512F-NEXT: vpand %ymm4, %ymm7, %ymm4 +; AVX512F-NEXT: vpsrlw $7, %ymm2, %ymm3 +; AVX512F-NEXT: vpand %ymm5, %ymm3, %ymm3 ; AVX512F-NEXT: vpsrlw $2, %ymm2, %ymm2 -; AVX512F-NEXT: vpand %ymm5, %ymm2, %ymm2 -; AVX512F-NEXT: vpxor %ymm6, %ymm2, %ymm2 -; AVX512F-NEXT: vpsubb %ymm6, %ymm2, %ymm2 -; AVX512F-NEXT: vpaddb %ymm4, %ymm2, %ymm2 -; AVX512F-NEXT: vpmovsxbw %xmm2, %ymm4 -; AVX512F-NEXT: vpmullw %ymm3, %ymm4, %ymm4 -; AVX512F-NEXT: vpmovsxwd %ymm4, %zmm4 -; AVX512F-NEXT: vpmovdb %zmm4, %xmm4 -; AVX512F-NEXT: vextracti128 $1, %ymm2, %xmm2 -; AVX512F-NEXT: vpmovsxbw %xmm2, %ymm2 -; AVX512F-NEXT: vpmullw %ymm3, %ymm2, %ymm2 -; AVX512F-NEXT: vpmovsxwd %ymm2, %zmm2 -; AVX512F-NEXT: vpmovdb %zmm2, %xmm2 -; AVX512F-NEXT: vinserti128 $1, %xmm2, %ymm4, %ymm2 +; AVX512F-NEXT: vpand %ymm6, %ymm2, %ymm2 +; AVX512F-NEXT: vpxor %ymm7, %ymm2, %ymm2 +; AVX512F-NEXT: vpsubb %ymm7, %ymm2, %ymm2 +; AVX512F-NEXT: vpaddb %ymm3, %ymm2, %ymm2 +; AVX512F-NEXT: vpsllw $3, %ymm2, %ymm3 +; AVX512F-NEXT: vpand %ymm8, %ymm3, %ymm3 +; AVX512F-NEXT: vpsubb %ymm2, %ymm3, %ymm2 ; AVX512F-NEXT: vpsubb %ymm2, %ymm1, %ymm1 ; AVX512F-NEXT: retq ; Index: test/CodeGen/X86/vector-idiv-udiv-128.ll =================================================================== --- test/CodeGen/X86/vector-idiv-udiv-128.ll +++ test/CodeGen/X86/vector-idiv-udiv-128.ll @@ -381,14 +381,10 @@ ; SSE2-NEXT: psrld $1, %xmm1 ; SSE2-NEXT: paddd %xmm2, %xmm1 ; SSE2-NEXT: psrld $2, %xmm1 -; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [7,7,7,7] -; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3] -; SSE2-NEXT: pmuludq %xmm2, %xmm1 -; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] -; SSE2-NEXT: pmuludq %xmm2, %xmm3 -; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,2,2,3] -; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] -; SSE2-NEXT: psubd %xmm1, %xmm0 +; SSE2-NEXT: movdqa %xmm1, %xmm2 +; SSE2-NEXT: pslld $3, %xmm2 +; SSE2-NEXT: psubd %xmm1, %xmm2 +; SSE2-NEXT: psubd %xmm2, %xmm0 ; SSE2-NEXT: retq ; ; SSE41-LABEL: test_rem7_4i32: @@ -405,8 +401,10 @@ ; SSE41-NEXT: psrld $1, %xmm2 ; SSE41-NEXT: paddd %xmm1, %xmm2 ; SSE41-NEXT: psrld $2, %xmm2 -; SSE41-NEXT: pmulld {{.*}}(%rip), %xmm2 -; SSE41-NEXT: psubd %xmm2, %xmm0 +; SSE41-NEXT: movdqa %xmm2, %xmm1 +; SSE41-NEXT: pslld $3, %xmm1 +; SSE41-NEXT: psubd %xmm2, %xmm1 +; SSE41-NEXT: psubd %xmm1, %xmm0 ; SSE41-NEXT: retq ; ; AVX1-LABEL: test_rem7_4i32: @@ -422,7 +420,8 @@ ; AVX1-NEXT: vpsrld $1, %xmm2, %xmm2 ; AVX1-NEXT: vpaddd %xmm1, %xmm2, %xmm1 ; AVX1-NEXT: vpsrld $2, %xmm1, %xmm1 -; AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1 +; AVX1-NEXT: vpslld $3, %xmm1, %xmm2 +; AVX1-NEXT: vpsubd %xmm1, %xmm2, %xmm1 ; AVX1-NEXT: vpsubd %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: retq ; @@ -439,8 +438,8 @@ ; AVX2-NEXT: vpsrld $1, %xmm2, %xmm2 ; AVX2-NEXT: vpaddd %xmm1, %xmm2, %xmm1 ; AVX2-NEXT: vpsrld $2, %xmm1, %xmm1 -; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [7,7,7,7] -; AVX2-NEXT: vpmulld %xmm2, %xmm1, %xmm1 +; AVX2-NEXT: vpslld $3, %xmm1, %xmm2 +; AVX2-NEXT: vpsubd %xmm1, %xmm2, %xmm1 ; AVX2-NEXT: vpsubd %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: retq %res = urem <4 x i32> %a, @@ -457,8 +456,10 @@ ; SSE-NEXT: psrlw $1, %xmm2 ; SSE-NEXT: paddw %xmm1, %xmm2 ; SSE-NEXT: psrlw $2, %xmm2 -; SSE-NEXT: pmullw {{.*}}(%rip), %xmm2 -; SSE-NEXT: psubw %xmm2, %xmm0 +; SSE-NEXT: movdqa %xmm2, %xmm1 +; SSE-NEXT: psllw $3, %xmm1 +; SSE-NEXT: psubw %xmm2, %xmm1 +; SSE-NEXT: psubw %xmm1, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: test_rem7_8i16: @@ -468,7 +469,8 @@ ; AVX-NEXT: vpsrlw $1, %xmm2, %xmm2 ; AVX-NEXT: vpaddw %xmm1, %xmm2, %xmm1 ; AVX-NEXT: vpsrlw $2, %xmm1, %xmm1 -; AVX-NEXT: vpmullw {{.*}}(%rip), %xmm1, %xmm1 +; AVX-NEXT: vpsllw $3, %xmm1, %xmm2 +; AVX-NEXT: vpsubw %xmm1, %xmm2, %xmm1 ; AVX-NEXT: vpsubw %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq %res = urem <8 x i16> %a, @@ -497,18 +499,10 @@ ; SSE2-NEXT: psrlw $2, %xmm1 ; SSE2-NEXT: pand {{.*}}(%rip), %xmm1 ; SSE2-NEXT: movdqa %xmm1, %xmm2 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] -; SSE2-NEXT: psraw $8, %xmm2 -; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [7,7,7,7,7,7,7,7] -; SSE2-NEXT: pmullw %xmm3, %xmm2 -; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255] -; SSE2-NEXT: pand %xmm4, %xmm2 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; SSE2-NEXT: psraw $8, %xmm1 -; SSE2-NEXT: pmullw %xmm3, %xmm1 -; SSE2-NEXT: pand %xmm4, %xmm1 -; SSE2-NEXT: packuswb %xmm2, %xmm1 -; SSE2-NEXT: psubb %xmm1, %xmm0 +; SSE2-NEXT: psllw $3, %xmm2 +; SSE2-NEXT: pand {{.*}}(%rip), %xmm2 +; SSE2-NEXT: psubb %xmm1, %xmm2 +; SSE2-NEXT: psubb %xmm2, %xmm0 ; SSE2-NEXT: retq ; ; SSE41-LABEL: test_rem7_16i8: @@ -529,16 +523,10 @@ ; SSE41-NEXT: paddb %xmm1, %xmm2 ; SSE41-NEXT: psrlw $2, %xmm2 ; SSE41-NEXT: pand {{.*}}(%rip), %xmm2 -; SSE41-NEXT: pmovsxbw %xmm2, %xmm1 -; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [7,7,7,7,7,7,7,7] -; SSE41-NEXT: pmullw %xmm3, %xmm1 -; SSE41-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255] -; SSE41-NEXT: pand %xmm4, %xmm1 -; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1] -; SSE41-NEXT: pmovsxbw %xmm2, %xmm2 -; SSE41-NEXT: pmullw %xmm3, %xmm2 -; SSE41-NEXT: pand %xmm4, %xmm2 -; SSE41-NEXT: packuswb %xmm2, %xmm1 +; SSE41-NEXT: movdqa %xmm2, %xmm1 +; SSE41-NEXT: psllw $3, %xmm1 +; SSE41-NEXT: pand {{.*}}(%rip), %xmm1 +; SSE41-NEXT: psubb %xmm2, %xmm1 ; SSE41-NEXT: psubb %xmm1, %xmm0 ; SSE41-NEXT: retq ; @@ -559,16 +547,9 @@ ; AVX1-NEXT: vpaddb %xmm1, %xmm2, %xmm1 ; AVX1-NEXT: vpsrlw $2, %xmm1, %xmm1 ; AVX1-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1 -; AVX1-NEXT: vpmovsxbw %xmm1, %xmm2 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [7,7,7,7,7,7,7,7] -; AVX1-NEXT: vpmullw %xmm3, %xmm2, %xmm2 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255] -; AVX1-NEXT: vpand %xmm4, %xmm2, %xmm2 -; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] -; AVX1-NEXT: vpmovsxbw %xmm1, %xmm1 -; AVX1-NEXT: vpmullw %xmm3, %xmm1, %xmm1 -; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm1 -; AVX1-NEXT: vpackuswb %xmm1, %xmm2, %xmm1 +; AVX1-NEXT: vpsllw $3, %xmm1, %xmm2 +; AVX1-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2 +; AVX1-NEXT: vpsubb %xmm1, %xmm2, %xmm1 ; AVX1-NEXT: vpsubb %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: retq ; @@ -585,13 +566,9 @@ ; AVX2NOBW-NEXT: vpaddb %xmm1, %xmm2, %xmm1 ; AVX2NOBW-NEXT: vpsrlw $2, %xmm1, %xmm1 ; AVX2NOBW-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1 -; AVX2NOBW-NEXT: vpmovsxbw %xmm1, %ymm1 -; AVX2NOBW-NEXT: vpmullw {{.*}}(%rip), %ymm1, %ymm1 -; AVX2NOBW-NEXT: vextracti128 $1, %ymm1, %xmm2 -; AVX2NOBW-NEXT: vmovdqa {{.*#+}} xmm3 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u> -; AVX2NOBW-NEXT: vpshufb %xmm3, %xmm2, %xmm2 -; AVX2NOBW-NEXT: vpshufb %xmm3, %xmm1, %xmm1 -; AVX2NOBW-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] +; AVX2NOBW-NEXT: vpsllw $3, %xmm1, %xmm2 +; AVX2NOBW-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2 +; AVX2NOBW-NEXT: vpsubb %xmm1, %xmm2, %xmm1 ; AVX2NOBW-NEXT: vpsubb %xmm1, %xmm0, %xmm0 ; AVX2NOBW-NEXT: vzeroupper ; AVX2NOBW-NEXT: retq @@ -608,9 +585,9 @@ ; AVX512BW-NEXT: vpaddb %xmm1, %xmm2, %xmm1 ; AVX512BW-NEXT: vpsrlw $2, %xmm1, %xmm1 ; AVX512BW-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1 -; AVX512BW-NEXT: vpmovsxbw %xmm1, %ymm1 -; AVX512BW-NEXT: vpmullw {{.*}}(%rip), %ymm1, %ymm1 -; AVX512BW-NEXT: vpmovwb %zmm1, %ymm1 +; AVX512BW-NEXT: vpsllw $3, %xmm1, %xmm2 +; AVX512BW-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2 +; AVX512BW-NEXT: vpsubb %xmm1, %xmm2, %xmm1 ; AVX512BW-NEXT: vpsubb %xmm1, %xmm0, %xmm0 ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq Index: test/CodeGen/X86/vector-idiv-udiv-256.ll =================================================================== --- test/CodeGen/X86/vector-idiv-udiv-256.ll +++ test/CodeGen/X86/vector-idiv-udiv-256.ll @@ -384,20 +384,21 @@ ; AVX1-NEXT: vpsrld $1, %xmm3, %xmm3 ; AVX1-NEXT: vpaddd %xmm2, %xmm3, %xmm2 ; AVX1-NEXT: vpsrld $2, %xmm2, %xmm2 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [7,7,7,7] -; AVX1-NEXT: vpmulld %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vpslld $3, %xmm2, %xmm3 +; AVX1-NEXT: vpsubd %xmm2, %xmm3, %xmm2 ; AVX1-NEXT: vpsubd %xmm2, %xmm4, %xmm2 -; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[1,1,3,3] -; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm0[1,1,3,3] -; AVX1-NEXT: vpmuludq %xmm4, %xmm5, %xmm4 +; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[1,1,3,3] +; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[1,1,3,3] +; AVX1-NEXT: vpmuludq %xmm3, %xmm4, %xmm3 ; AVX1-NEXT: vpmuludq %xmm1, %xmm0, %xmm1 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] -; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm4[2,3],xmm1[4,5],xmm4[6,7] -; AVX1-NEXT: vpsubd %xmm1, %xmm0, %xmm4 -; AVX1-NEXT: vpsrld $1, %xmm4, %xmm4 -; AVX1-NEXT: vpaddd %xmm1, %xmm4, %xmm1 +; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7] +; AVX1-NEXT: vpsubd %xmm1, %xmm0, %xmm3 +; AVX1-NEXT: vpsrld $1, %xmm3, %xmm3 +; AVX1-NEXT: vpaddd %xmm1, %xmm3, %xmm1 ; AVX1-NEXT: vpsrld $2, %xmm1, %xmm1 -; AVX1-NEXT: vpmulld %xmm3, %xmm1, %xmm1 +; AVX1-NEXT: vpslld $3, %xmm1, %xmm3 +; AVX1-NEXT: vpsubd %xmm1, %xmm3, %xmm1 ; AVX1-NEXT: vpsubd %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 ; AVX1-NEXT: retq @@ -415,8 +416,8 @@ ; AVX2-NEXT: vpsrld $1, %ymm2, %ymm2 ; AVX2-NEXT: vpaddd %ymm1, %ymm2, %ymm1 ; AVX2-NEXT: vpsrld $2, %ymm1, %ymm1 -; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm2 = [7,7,7,7,7,7,7,7] -; AVX2-NEXT: vpmulld %ymm2, %ymm1, %ymm1 +; AVX2-NEXT: vpslld $3, %ymm1, %ymm2 +; AVX2-NEXT: vpsubd %ymm1, %ymm2, %ymm1 ; AVX2-NEXT: vpsubd %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: retq %res = urem <8 x i32> %a, @@ -433,15 +434,16 @@ ; AVX1-NEXT: vpsrlw $1, %xmm4, %xmm4 ; AVX1-NEXT: vpaddw %xmm3, %xmm4, %xmm3 ; AVX1-NEXT: vpsrlw $2, %xmm3, %xmm3 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [7,7,7,7,7,7,7,7] -; AVX1-NEXT: vpmullw %xmm4, %xmm3, %xmm3 +; AVX1-NEXT: vpsllw $3, %xmm3, %xmm4 +; AVX1-NEXT: vpsubw %xmm3, %xmm4, %xmm3 ; AVX1-NEXT: vpsubw %xmm3, %xmm1, %xmm1 ; AVX1-NEXT: vpmulhuw %xmm2, %xmm0, %xmm2 ; AVX1-NEXT: vpsubw %xmm2, %xmm0, %xmm3 ; AVX1-NEXT: vpsrlw $1, %xmm3, %xmm3 ; AVX1-NEXT: vpaddw %xmm2, %xmm3, %xmm2 ; AVX1-NEXT: vpsrlw $2, %xmm2, %xmm2 -; AVX1-NEXT: vpmullw %xmm4, %xmm2, %xmm2 +; AVX1-NEXT: vpsllw $3, %xmm2, %xmm3 +; AVX1-NEXT: vpsubw %xmm2, %xmm3, %xmm2 ; AVX1-NEXT: vpsubw %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX1-NEXT: retq @@ -453,7 +455,8 @@ ; AVX2-NEXT: vpsrlw $1, %ymm2, %ymm2 ; AVX2-NEXT: vpaddw %ymm1, %ymm2, %ymm1 ; AVX2-NEXT: vpsrlw $2, %ymm1, %ymm1 -; AVX2-NEXT: vpmullw {{.*}}(%rip), %ymm1, %ymm1 +; AVX2-NEXT: vpsllw $3, %ymm1, %ymm2 +; AVX2-NEXT: vpsubw %ymm1, %ymm2, %ymm1 ; AVX2-NEXT: vpsubw %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: retq %res = urem <16 x i16> %a, @@ -463,59 +466,48 @@ define <32 x i8> @test_rem7_32i8(<32 x i8> %a) nounwind { ; AVX1-LABEL: test_rem7_32i8: ; AVX1: # BB#0: -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 -; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero -; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [37,37,37,37,37,37,37,37] -; AVX1-NEXT: vpmullw %xmm1, %xmm3, %xmm3 -; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3 -; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm2[2,3,0,1] +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 +; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero +; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [37,37,37,37,37,37,37,37] +; AVX1-NEXT: vpmullw %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2 +; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[2,3,0,1] ; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero -; AVX1-NEXT: vpmullw %xmm1, %xmm4, %xmm4 +; AVX1-NEXT: vpmullw %xmm3, %xmm4, %xmm4 ; AVX1-NEXT: vpsrlw $8, %xmm4, %xmm4 -; AVX1-NEXT: vpackuswb %xmm4, %xmm3, %xmm3 -; AVX1-NEXT: vpsubb %xmm3, %xmm2, %xmm4 +; AVX1-NEXT: vpackuswb %xmm4, %xmm2, %xmm2 +; AVX1-NEXT: vpsubb %xmm2, %xmm1, %xmm4 ; AVX1-NEXT: vpsrlw $1, %xmm4, %xmm4 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm8 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127] -; AVX1-NEXT: vpand %xmm8, %xmm4, %xmm4 -; AVX1-NEXT: vpaddb %xmm3, %xmm4, %xmm3 -; AVX1-NEXT: vpsrlw $2, %xmm3, %xmm3 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127] +; AVX1-NEXT: vpand %xmm5, %xmm4, %xmm4 +; AVX1-NEXT: vpaddb %xmm2, %xmm4, %xmm2 +; AVX1-NEXT: vpsrlw $2, %xmm2, %xmm2 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63] -; AVX1-NEXT: vpand %xmm4, %xmm3, %xmm3 -; AVX1-NEXT: vpmovsxbw %xmm3, %xmm6 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm7 = [7,7,7,7,7,7,7,7] -; AVX1-NEXT: vpmullw %xmm7, %xmm6, %xmm6 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [255,255,255,255,255,255,255,255] -; AVX1-NEXT: vpand %xmm5, %xmm6, %xmm6 -; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,3,0,1] -; AVX1-NEXT: vpmovsxbw %xmm3, %xmm3 -; AVX1-NEXT: vpmullw %xmm7, %xmm3, %xmm3 -; AVX1-NEXT: vpand %xmm5, %xmm3, %xmm3 -; AVX1-NEXT: vpackuswb %xmm3, %xmm6, %xmm3 -; AVX1-NEXT: vpsubb %xmm3, %xmm2, %xmm2 -; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero -; AVX1-NEXT: vpmullw %xmm1, %xmm3, %xmm3 -; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3 +; AVX1-NEXT: vpand %xmm4, %xmm2, %xmm2 +; AVX1-NEXT: vpsllw $3, %xmm2, %xmm6 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm7 = [248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248] +; AVX1-NEXT: vpand %xmm7, %xmm6, %xmm6 +; AVX1-NEXT: vpsubb %xmm2, %xmm6, %xmm2 +; AVX1-NEXT: vpsubb %xmm2, %xmm1, %xmm1 +; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero +; AVX1-NEXT: vpmullw %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2 ; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm0[2,3,0,1] ; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm6 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero,xmm6[4],zero,xmm6[5],zero,xmm6[6],zero,xmm6[7],zero -; AVX1-NEXT: vpmullw %xmm1, %xmm6, %xmm1 -; AVX1-NEXT: vpsrlw $8, %xmm1, %xmm1 -; AVX1-NEXT: vpackuswb %xmm1, %xmm3, %xmm1 -; AVX1-NEXT: vpsubb %xmm1, %xmm0, %xmm3 +; AVX1-NEXT: vpmullw %xmm3, %xmm6, %xmm3 +; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3 +; AVX1-NEXT: vpackuswb %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vpsubb %xmm2, %xmm0, %xmm3 ; AVX1-NEXT: vpsrlw $1, %xmm3, %xmm3 -; AVX1-NEXT: vpand %xmm8, %xmm3, %xmm3 -; AVX1-NEXT: vpaddb %xmm1, %xmm3, %xmm1 -; AVX1-NEXT: vpsrlw $2, %xmm1, %xmm1 -; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm1 -; AVX1-NEXT: vpmovsxbw %xmm1, %xmm3 -; AVX1-NEXT: vpmullw %xmm7, %xmm3, %xmm3 ; AVX1-NEXT: vpand %xmm5, %xmm3, %xmm3 -; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] -; AVX1-NEXT: vpmovsxbw %xmm1, %xmm1 -; AVX1-NEXT: vpmullw %xmm7, %xmm1, %xmm1 -; AVX1-NEXT: vpand %xmm5, %xmm1, %xmm1 -; AVX1-NEXT: vpackuswb %xmm1, %xmm3, %xmm1 -; AVX1-NEXT: vpsubb %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 +; AVX1-NEXT: vpaddb %xmm2, %xmm3, %xmm2 +; AVX1-NEXT: vpsrlw $2, %xmm2, %xmm2 +; AVX1-NEXT: vpand %xmm4, %xmm2, %xmm2 +; AVX1-NEXT: vpsllw $3, %xmm2, %xmm3 +; AVX1-NEXT: vpand %xmm7, %xmm3, %xmm3 +; AVX1-NEXT: vpsubb %xmm2, %xmm3, %xmm2 +; AVX1-NEXT: vpsubb %xmm2, %xmm0, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX1-NEXT: retq ; ; AVX2NOBW-LABEL: test_rem7_32i8: @@ -537,22 +529,9 @@ ; AVX2NOBW-NEXT: vpaddb %ymm1, %ymm2, %ymm1 ; AVX2NOBW-NEXT: vpsrlw $2, %ymm1, %ymm1 ; AVX2NOBW-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1 -; AVX2NOBW-NEXT: vextracti128 $1, %ymm1, %xmm2 -; AVX2NOBW-NEXT: vpmovsxbw %xmm2, %ymm2 -; AVX2NOBW-NEXT: vmovdqa {{.*#+}} ymm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7] -; AVX2NOBW-NEXT: vpmullw %ymm3, %ymm2, %ymm2 -; AVX2NOBW-NEXT: vextracti128 $1, %ymm2, %xmm4 -; AVX2NOBW-NEXT: vmovdqa {{.*#+}} xmm5 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u> -; AVX2NOBW-NEXT: vpshufb %xmm5, %xmm4, %xmm4 -; AVX2NOBW-NEXT: vpshufb %xmm5, %xmm2, %xmm2 -; AVX2NOBW-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm4[0] -; AVX2NOBW-NEXT: vpmovsxbw %xmm1, %ymm1 -; AVX2NOBW-NEXT: vpmullw %ymm3, %ymm1, %ymm1 -; AVX2NOBW-NEXT: vextracti128 $1, %ymm1, %xmm3 -; AVX2NOBW-NEXT: vpshufb %xmm5, %xmm3, %xmm3 -; AVX2NOBW-NEXT: vpshufb %xmm5, %xmm1, %xmm1 -; AVX2NOBW-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0] -; AVX2NOBW-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 +; AVX2NOBW-NEXT: vpsllw $3, %ymm1, %ymm2 +; AVX2NOBW-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2 +; AVX2NOBW-NEXT: vpsubb %ymm1, %ymm2, %ymm1 ; AVX2NOBW-NEXT: vpsubb %ymm1, %ymm0, %ymm0 ; AVX2NOBW-NEXT: retq ; @@ -568,9 +547,9 @@ ; AVX512BW-NEXT: vpaddb %ymm1, %ymm2, %ymm1 ; AVX512BW-NEXT: vpsrlw $2, %ymm1, %ymm1 ; AVX512BW-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1 -; AVX512BW-NEXT: vpmovsxbw %ymm1, %zmm1 -; AVX512BW-NEXT: vpmullw {{.*}}(%rip), %zmm1, %zmm1 -; AVX512BW-NEXT: vpmovwb %zmm1, %ymm1 +; AVX512BW-NEXT: vpsllw $3, %ymm1, %ymm2 +; AVX512BW-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2 +; AVX512BW-NEXT: vpsubb %ymm1, %ymm2, %ymm1 ; AVX512BW-NEXT: vpsubb %ymm1, %ymm0, %ymm0 ; AVX512BW-NEXT: retq %res = urem <32 x i8> %a, Index: test/CodeGen/X86/vector-idiv-udiv-512.ll =================================================================== --- test/CodeGen/X86/vector-idiv-udiv-512.ll +++ test/CodeGen/X86/vector-idiv-udiv-512.ll @@ -333,7 +333,8 @@ ; AVX-NEXT: vpsrld $1, %zmm1, %zmm1 ; AVX-NEXT: vpaddd %zmm3, %zmm1, %zmm1 ; AVX-NEXT: vpsrld $2, %zmm1, %zmm1 -; AVX-NEXT: vpmulld {{.*}}(%rip){1to16}, %zmm1, %zmm1 +; AVX-NEXT: vpslld $3, %zmm1, %zmm2 +; AVX-NEXT: vpsubd %zmm1, %zmm2, %zmm1 ; AVX-NEXT: vpsubd %zmm1, %zmm0, %zmm0 ; AVX-NEXT: retq %res = urem <16 x i32> %a, @@ -349,15 +350,16 @@ ; AVX512F-NEXT: vpsrlw $1, %ymm4, %ymm4 ; AVX512F-NEXT: vpaddw %ymm3, %ymm4, %ymm3 ; AVX512F-NEXT: vpsrlw $2, %ymm3, %ymm3 -; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7] -; AVX512F-NEXT: vpmullw %ymm4, %ymm3, %ymm3 +; AVX512F-NEXT: vpsllw $3, %ymm3, %ymm4 +; AVX512F-NEXT: vpsubw %ymm3, %ymm4, %ymm3 ; AVX512F-NEXT: vpsubw %ymm3, %ymm0, %ymm0 ; AVX512F-NEXT: vpmulhuw %ymm2, %ymm1, %ymm2 ; AVX512F-NEXT: vpsubw %ymm2, %ymm1, %ymm3 ; AVX512F-NEXT: vpsrlw $1, %ymm3, %ymm3 ; AVX512F-NEXT: vpaddw %ymm2, %ymm3, %ymm2 ; AVX512F-NEXT: vpsrlw $2, %ymm2, %ymm2 -; AVX512F-NEXT: vpmullw %ymm4, %ymm2, %ymm2 +; AVX512F-NEXT: vpsllw $3, %ymm2, %ymm3 +; AVX512F-NEXT: vpsubw %ymm2, %ymm3, %ymm2 ; AVX512F-NEXT: vpsubw %ymm2, %ymm1, %ymm1 ; AVX512F-NEXT: retq ; @@ -368,7 +370,8 @@ ; AVX512BW-NEXT: vpsrlw $1, %zmm2, %zmm2 ; AVX512BW-NEXT: vpaddw %zmm1, %zmm2, %zmm1 ; AVX512BW-NEXT: vpsrlw $2, %zmm1, %zmm1 -; AVX512BW-NEXT: vpmullw {{.*}}(%rip), %zmm1, %zmm1 +; AVX512BW-NEXT: vpsllw $3, %zmm1, %zmm2 +; AVX512BW-NEXT: vpsubw %zmm1, %zmm2, %zmm1 ; AVX512BW-NEXT: vpsubw %zmm1, %zmm0, %zmm0 ; AVX512BW-NEXT: retq %res = urem <32 x i16> %a, @@ -379,62 +382,48 @@ ; AVX512F-LABEL: test_rem7_64i8: ; AVX512F: # BB#0: ; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm2 -; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm3 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero -; AVX512F-NEXT: vmovdqa {{.*#+}} ymm2 = [37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37] -; AVX512F-NEXT: vpmullw %ymm2, %ymm3, %ymm3 -; AVX512F-NEXT: vpsrlw $8, %ymm3, %ymm3 +; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero +; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37] +; AVX512F-NEXT: vpmullw %ymm3, %ymm2, %ymm2 +; AVX512F-NEXT: vpsrlw $8, %ymm2, %ymm2 ; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm4 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero -; AVX512F-NEXT: vpmullw %ymm2, %ymm4, %ymm4 +; AVX512F-NEXT: vpmullw %ymm3, %ymm4, %ymm4 ; AVX512F-NEXT: vpsrlw $8, %ymm4, %ymm4 -; AVX512F-NEXT: vperm2i128 {{.*#+}} ymm5 = ymm4[2,3],ymm3[2,3] -; AVX512F-NEXT: vinserti128 $1, %xmm3, %ymm4, %ymm3 -; AVX512F-NEXT: vpackuswb %ymm5, %ymm3, %ymm3 -; AVX512F-NEXT: vpsubb %ymm3, %ymm0, %ymm4 -; AVX512F-NEXT: vpsrlw $1, %ymm4, %ymm5 -; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127] -; AVX512F-NEXT: vpand %ymm4, %ymm5, %ymm5 -; AVX512F-NEXT: vpaddb %ymm3, %ymm5, %ymm3 -; AVX512F-NEXT: vpsrlw $2, %ymm3, %ymm3 -; AVX512F-NEXT: vmovdqa {{.*#+}} ymm5 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63] -; AVX512F-NEXT: vpand %ymm5, %ymm3, %ymm6 -; AVX512F-NEXT: vpmovsxbw %xmm6, %ymm7 -; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7] -; AVX512F-NEXT: vpmullw %ymm3, %ymm7, %ymm7 -; AVX512F-NEXT: vpmovsxwd %ymm7, %zmm7 -; AVX512F-NEXT: vpmovdb %zmm7, %xmm7 -; AVX512F-NEXT: vextracti128 $1, %ymm6, %xmm6 -; AVX512F-NEXT: vpmovsxbw %xmm6, %ymm6 -; AVX512F-NEXT: vpmullw %ymm3, %ymm6, %ymm6 -; AVX512F-NEXT: vpmovsxwd %ymm6, %zmm6 -; AVX512F-NEXT: vpmovdb %zmm6, %xmm6 -; AVX512F-NEXT: vinserti128 $1, %xmm6, %ymm7, %ymm6 -; AVX512F-NEXT: vpsubb %ymm6, %ymm0, %ymm0 -; AVX512F-NEXT: vextracti128 $1, %ymm1, %xmm6 -; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm6 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero,xmm6[4],zero,xmm6[5],zero,xmm6[6],zero,xmm6[7],zero,xmm6[8],zero,xmm6[9],zero,xmm6[10],zero,xmm6[11],zero,xmm6[12],zero,xmm6[13],zero,xmm6[14],zero,xmm6[15],zero -; AVX512F-NEXT: vpmullw %ymm2, %ymm6, %ymm6 -; AVX512F-NEXT: vpsrlw $8, %ymm6, %ymm6 -; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm7 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero -; AVX512F-NEXT: vpmullw %ymm2, %ymm7, %ymm2 -; AVX512F-NEXT: vpsrlw $8, %ymm2, %ymm2 -; AVX512F-NEXT: vperm2i128 {{.*#+}} ymm7 = ymm2[2,3],ymm6[2,3] -; AVX512F-NEXT: vinserti128 $1, %xmm6, %ymm2, %ymm2 -; AVX512F-NEXT: vpackuswb %ymm7, %ymm2, %ymm2 -; AVX512F-NEXT: vpsubb %ymm2, %ymm1, %ymm6 -; AVX512F-NEXT: vpsrlw $1, %ymm6, %ymm6 -; AVX512F-NEXT: vpand %ymm4, %ymm6, %ymm4 +; AVX512F-NEXT: vperm2i128 {{.*#+}} ymm5 = ymm4[2,3],ymm2[2,3] +; AVX512F-NEXT: vinserti128 $1, %xmm2, %ymm4, %ymm2 +; AVX512F-NEXT: vpackuswb %ymm5, %ymm2, %ymm2 +; AVX512F-NEXT: vpsubb %ymm2, %ymm0, %ymm4 +; AVX512F-NEXT: vpsrlw $1, %ymm4, %ymm4 +; AVX512F-NEXT: vmovdqa {{.*#+}} ymm5 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127] +; AVX512F-NEXT: vpand %ymm5, %ymm4, %ymm4 ; AVX512F-NEXT: vpaddb %ymm2, %ymm4, %ymm2 ; AVX512F-NEXT: vpsrlw $2, %ymm2, %ymm2 -; AVX512F-NEXT: vpand %ymm5, %ymm2, %ymm2 -; AVX512F-NEXT: vpmovsxbw %xmm2, %ymm4 -; AVX512F-NEXT: vpmullw %ymm3, %ymm4, %ymm4 -; AVX512F-NEXT: vpmovsxwd %ymm4, %zmm4 -; AVX512F-NEXT: vpmovdb %zmm4, %xmm4 -; AVX512F-NEXT: vextracti128 $1, %ymm2, %xmm2 -; AVX512F-NEXT: vpmovsxbw %xmm2, %ymm2 +; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63] +; AVX512F-NEXT: vpand %ymm4, %ymm2, %ymm2 +; AVX512F-NEXT: vpsllw $3, %ymm2, %ymm6 +; AVX512F-NEXT: vmovdqa {{.*#+}} ymm7 = [248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248] +; AVX512F-NEXT: vpand %ymm7, %ymm6, %ymm6 +; AVX512F-NEXT: vpsubb %ymm2, %ymm6, %ymm2 +; AVX512F-NEXT: vpsubb %ymm2, %ymm0, %ymm0 +; AVX512F-NEXT: vextracti128 $1, %ymm1, %xmm2 +; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero ; AVX512F-NEXT: vpmullw %ymm3, %ymm2, %ymm2 -; AVX512F-NEXT: vpmovsxwd %ymm2, %zmm2 -; AVX512F-NEXT: vpmovdb %zmm2, %xmm2 -; AVX512F-NEXT: vinserti128 $1, %xmm2, %ymm4, %ymm2 +; AVX512F-NEXT: vpsrlw $8, %ymm2, %ymm2 +; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm6 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero +; AVX512F-NEXT: vpmullw %ymm3, %ymm6, %ymm3 +; AVX512F-NEXT: vpsrlw $8, %ymm3, %ymm3 +; AVX512F-NEXT: vperm2i128 {{.*#+}} ymm6 = ymm3[2,3],ymm2[2,3] +; AVX512F-NEXT: vinserti128 $1, %xmm2, %ymm3, %ymm2 +; AVX512F-NEXT: vpackuswb %ymm6, %ymm2, %ymm2 +; AVX512F-NEXT: vpsubb %ymm2, %ymm1, %ymm3 +; AVX512F-NEXT: vpsrlw $1, %ymm3, %ymm3 +; AVX512F-NEXT: vpand %ymm5, %ymm3, %ymm3 +; AVX512F-NEXT: vpaddb %ymm2, %ymm3, %ymm2 +; AVX512F-NEXT: vpsrlw $2, %ymm2, %ymm2 +; AVX512F-NEXT: vpand %ymm4, %ymm2, %ymm2 +; AVX512F-NEXT: vpsllw $3, %ymm2, %ymm3 +; AVX512F-NEXT: vpand %ymm7, %ymm3, %ymm3 +; AVX512F-NEXT: vpsubb %ymm2, %ymm3, %ymm2 ; AVX512F-NEXT: vpsubb %ymm2, %ymm1, %ymm1 ; AVX512F-NEXT: retq ; Index: test/CodeGen/X86/vector-mul.ll =================================================================== --- test/CodeGen/X86/vector-mul.ll +++ test/CodeGen/X86/vector-mul.ll @@ -355,13 +355,8 @@ ; ; X64-XOP-LABEL: mul_v16i8_17: ; X64-XOP: # BB#0: -; X64-XOP-NEXT: vpmovsxbw %xmm0, %xmm1 -; X64-XOP-NEXT: vmovdqa {{.*#+}} xmm2 = [17,17,17,17,17,17,17,17] -; X64-XOP-NEXT: vpmullw %xmm2, %xmm1, %xmm1 -; X64-XOP-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] -; X64-XOP-NEXT: vpmovsxbw %xmm0, %xmm0 -; X64-XOP-NEXT: vpmullw %xmm2, %xmm0, %xmm0 -; X64-XOP-NEXT: vpperm {{.*#+}} xmm0 = xmm1[0,2,4,6,8,10,12,14],xmm0[0,2,4,6,8,10,12,14] +; X64-XOP-NEXT: vpshlb {{.*}}(%rip), %xmm0, %xmm1 +; X64-XOP-NEXT: vpaddb %xmm0, %xmm1, %xmm0 ; X64-XOP-NEXT: retq ; ; X64-AVX2-LABEL: mul_v16i8_17: @@ -490,10 +485,13 @@ ; X64-XOP: # BB#0: ; X64-XOP-NEXT: vpmovsxbw %xmm0, %xmm1 ; X64-XOP-NEXT: vpmullw {{.*}}(%rip), %xmm1, %xmm1 +; X64-XOP-NEXT: vmovdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255] +; X64-XOP-NEXT: vpand %xmm2, %xmm1, %xmm1 ; X64-XOP-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] ; X64-XOP-NEXT: vpmovsxbw %xmm0, %xmm0 ; X64-XOP-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0 -; X64-XOP-NEXT: vpperm {{.*#+}} xmm0 = xmm1[0,2,4,6,8,10,12,14],xmm0[0,2,4,6,8,10,12,14] +; X64-XOP-NEXT: vpand %xmm2, %xmm0, %xmm0 +; X64-XOP-NEXT: vpackuswb %xmm0, %xmm1, %xmm0 ; X64-XOP-NEXT: retq ; ; X64-AVX2-LABEL: mul_v16i8_2_3_9_17_33_65_129_2_3_9_17_33_65_129_2_3: @@ -628,13 +626,8 @@ ; ; X64-XOP-LABEL: mul_v16i8_31: ; X64-XOP: # BB#0: -; X64-XOP-NEXT: vpmovsxbw %xmm0, %xmm1 -; X64-XOP-NEXT: vmovdqa {{.*#+}} xmm2 = [31,31,31,31,31,31,31,31] -; X64-XOP-NEXT: vpmullw %xmm2, %xmm1, %xmm1 -; X64-XOP-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] -; X64-XOP-NEXT: vpmovsxbw %xmm0, %xmm0 -; X64-XOP-NEXT: vpmullw %xmm2, %xmm0, %xmm0 -; X64-XOP-NEXT: vpperm {{.*#+}} xmm0 = xmm1[0,2,4,6,8,10,12,14],xmm0[0,2,4,6,8,10,12,14] +; X64-XOP-NEXT: vpshlb {{.*}}(%rip), %xmm0, %xmm1 +; X64-XOP-NEXT: vpsubb %xmm0, %xmm1, %xmm0 ; X64-XOP-NEXT: retq ; ; X64-AVX2-LABEL: mul_v16i8_31: @@ -996,10 +989,13 @@ ; X64-XOP-NEXT: vpmovsxbw %xmm0, %xmm1 ; X64-XOP-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,3,7,15,31,63,127] ; X64-XOP-NEXT: vpmullw %xmm2, %xmm1, %xmm1 +; X64-XOP-NEXT: vmovdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255] +; X64-XOP-NEXT: vpand %xmm3, %xmm1, %xmm1 ; X64-XOP-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] ; X64-XOP-NEXT: vpmovsxbw %xmm0, %xmm0 ; X64-XOP-NEXT: vpmullw %xmm2, %xmm0, %xmm0 -; X64-XOP-NEXT: vpperm {{.*#+}} xmm0 = xmm1[0,2,4,6,8,10,12,14],xmm0[0,2,4,6,8,10,12,14] +; X64-XOP-NEXT: vpand %xmm3, %xmm0, %xmm0 +; X64-XOP-NEXT: vpackuswb %xmm0, %xmm1, %xmm0 ; X64-XOP-NEXT: retq ; ; X64-AVX2-LABEL: mul_v16i8_0_1_3_7_15_31_63_127_0_1_3_7_15_31_63_127: Index: test/CodeGen/X86/vselect-avx.ll =================================================================== --- test/CodeGen/X86/vselect-avx.ll +++ test/CodeGen/X86/vselect-avx.ll @@ -94,7 +94,8 @@ ; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm4[2,3],xmm3[4,5],xmm4[6,7] ; AVX1-NEXT: vpsrld $31, %xmm3, %xmm4 ; AVX1-NEXT: vpaddd %xmm4, %xmm3, %xmm3 -; AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm3, %xmm3 +; AVX1-NEXT: vpslld $2, %xmm3, %xmm4 +; AVX1-NEXT: vpsubd %xmm3, %xmm4, %xmm3 ; AVX1-NEXT: vpsubd %xmm3, %xmm0, %xmm0 ; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3 ; AVX1-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0 @@ -116,8 +117,8 @@ ; AVX2-NEXT: vpblendd {{.*#+}} xmm3 = xmm3[0],xmm4[1],xmm3[2],xmm4[3] ; AVX2-NEXT: vpsrld $31, %xmm3, %xmm4 ; AVX2-NEXT: vpaddd %xmm4, %xmm3, %xmm3 -; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm4 = [3,3,3,3] -; AVX2-NEXT: vpmulld %xmm4, %xmm3, %xmm3 +; AVX2-NEXT: vpslld $2, %xmm3, %xmm4 +; AVX2-NEXT: vpsubd %xmm3, %xmm4, %xmm3 ; AVX2-NEXT: vpsubd %xmm3, %xmm0, %xmm0 ; AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3 ; AVX2-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0