Index: lib/Target/X86/X86ISelLowering.cpp =================================================================== --- lib/Target/X86/X86ISelLowering.cpp +++ lib/Target/X86/X86ISelLowering.cpp @@ -872,9 +872,6 @@ // Promote v16i8, v8i16, v4i32 load, select, and, or, xor to v2i64. for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32 }) { - setOperationPromotedToType(ISD::AND, VT, MVT::v2i64); - setOperationPromotedToType(ISD::OR, VT, MVT::v2i64); - setOperationPromotedToType(ISD::XOR, VT, MVT::v2i64); setOperationPromotedToType(ISD::LOAD, VT, MVT::v2i64); } @@ -1176,9 +1173,6 @@ // Promote v32i8, v16i16, v8i32 select, and, or, xor to v4i64. for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32 }) { - setOperationPromotedToType(ISD::AND, VT, MVT::v4i64); - setOperationPromotedToType(ISD::OR, VT, MVT::v4i64); - setOperationPromotedToType(ISD::XOR, VT, MVT::v4i64); setOperationPromotedToType(ISD::LOAD, VT, MVT::v4i64); } @@ -1378,13 +1372,6 @@ setCondCodeAction(ISD::SETLE, VT, Custom); } - // Need to promote to 64-bit even though we have 32-bit masked instructions - // because the IR optimizers rearrange bitcasts around logic ops leaving - // too many variations to handle if we don't promote them. - setOperationPromotedToType(ISD::AND, MVT::v16i32, MVT::v8i64); - setOperationPromotedToType(ISD::OR, MVT::v16i32, MVT::v8i64); - setOperationPromotedToType(ISD::XOR, MVT::v16i32, MVT::v8i64); - if (Subtarget.hasDQI()) { setOperationAction(ISD::SINT_TO_FP, MVT::v8i64, Legal); setOperationAction(ISD::UINT_TO_FP, MVT::v8i64, Legal); @@ -1590,10 +1577,6 @@ setOperationAction(ISD::UMIN, VT, Legal); setOperationAction(ISD::SETCC, VT, Custom); - setOperationPromotedToType(ISD::AND, VT, MVT::v8i64); - setOperationPromotedToType(ISD::OR, VT, MVT::v8i64); - setOperationPromotedToType(ISD::XOR, VT, MVT::v8i64); - // The condition codes aren't legal in SSE/AVX and under AVX512 we use // setcc all the way to isel and prefer SETGT in some isel patterns. setCondCodeAction(ISD::SETLT, VT, Custom); @@ -35345,6 +35328,27 @@ return DAG.getNode(ISD::ZERO_EXTEND, DL, N->getValueType(0), Setnp); } +// This promotes vectors and/or/xor to a vXi64 type. We used to do this during +// op legalization, but DAG combine yields better results. +// TODO: This is largely just to reduce the number of isel patterns. Maybe we +// can just add all the patterns or do C++ based selection in X86ISelDAGToDAG? +static SDValue promoteVecLogicOp(SDNode *N, SelectionDAG &DAG) { + MVT VT = N->getSimpleValueType(0); + + if (!VT.is128BitVector() && !VT.is256BitVector() && !VT.is512BitVector()) + return SDValue(); + + // Already correct type. + if (VT.getVectorElementType() == MVT::i64) + return SDValue(); + + MVT NewVT = MVT::getVectorVT(MVT::i64, VT.getSizeInBits() / 64); + SDValue Op0 = DAG.getBitcast(NewVT, N->getOperand(0)); + SDValue Op1 = DAG.getBitcast(NewVT, N->getOperand(1)); + return DAG.getBitcast(VT, DAG.getNode(N->getOpcode(), SDLoc(N), NewVT, + Op0, Op1)); +} + static SDValue combineAnd(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const X86Subtarget &Subtarget) { @@ -35379,6 +35383,9 @@ if (DCI.isBeforeLegalizeOps()) return SDValue(); + if (SDValue V = promoteVecLogicOp(N, DAG)) + return V; + if (SDValue R = combineCompareEqual(N, DAG, DCI, Subtarget)) return R; @@ -35701,6 +35708,9 @@ if (DCI.isBeforeLegalizeOps()) return SDValue(); + if (SDValue V = promoteVecLogicOp(N, DAG)) + return V; + if (SDValue R = combineCompareEqual(N, DAG, DCI, Subtarget)) return R; @@ -37720,6 +37730,9 @@ if (DCI.isBeforeLegalizeOps()) return SDValue(); + if (SDValue V = promoteVecLogicOp(N, DAG)) + return V; + if (SDValue SetCC = foldXor1SetCC(N, DAG)) return SetCC; Index: test/CodeGen/X86/avx-logic.ll =================================================================== --- test/CodeGen/X86/avx-logic.ll +++ test/CodeGen/X86/avx-logic.ll @@ -314,7 +314,7 @@ ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 ; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [1095216660735,1095216660735] +; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [255,255,255,255] ; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4 ; AVX1-NEXT: vpaddd %xmm4, %xmm0, %xmm0 @@ -342,7 +342,7 @@ ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4 ; AVX1-NEXT: vpaddd %xmm3, %xmm4, %xmm3 ; AVX1-NEXT: vpaddd %xmm0, %xmm1, %xmm0 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [1095216660735,1095216660735] +; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [255,255,255,255] ; AVX1-NEXT: vpandn %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpandn %xmm1, %xmm3, %xmm1 ; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3 @@ -450,7 +450,7 @@ ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 ; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [1095216660735,1095216660735] +; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [255,255,255,255] ; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4 ; AVX1-NEXT: vpaddd %xmm4, %xmm0, %xmm0 @@ -479,7 +479,7 @@ ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 ; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [1095216660735,1095216660735] +; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [255,255,255,255] ; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4 ; AVX1-NEXT: vpaddd %xmm4, %xmm0, %xmm0 @@ -537,7 +537,7 @@ ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 ; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [281470681808895,281470681808895] +; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [65535,65535,65535,65535] ; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4 ; AVX1-NEXT: vpaddd %xmm4, %xmm0, %xmm0 @@ -566,7 +566,7 @@ ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 ; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [281470681808895,281470681808895] +; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [65535,65535,65535,65535] ; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4 ; AVX1-NEXT: vpaddd %xmm4, %xmm0, %xmm0 Index: test/CodeGen/X86/avx512-ext.ll =================================================================== --- test/CodeGen/X86/avx512-ext.ll +++ test/CodeGen/X86/avx512-ext.ll @@ -2157,7 +2157,7 @@ define <4 x i32> @zext_4xi1_to_4x32(<4 x i8> %x, <4 x i8> %y) #0 { ; ALL-LABEL: zext_4xi1_to_4x32: ; ALL: # %bb.0: -; ALL-NEXT: vmovdqa {{.*#+}} xmm2 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0] +; ALL-NEXT: vpbroadcastd {{.*#+}} xmm2 = [255,255,255,255] ; ALL-NEXT: vpand %xmm2, %xmm1, %xmm1 ; ALL-NEXT: vpand %xmm2, %xmm0, %xmm0 ; ALL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 @@ -2171,7 +2171,7 @@ define <2 x i64> @zext_2xi1_to_2xi64(<2 x i8> %x, <2 x i8> %y) #0 { ; ALL-LABEL: zext_2xi1_to_2xi64: ; ALL: # %bb.0: -; ALL-NEXT: vmovdqa {{.*#+}} xmm2 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0] +; ALL-NEXT: vpbroadcastq {{.*#+}} xmm2 = [255,255] ; ALL-NEXT: vpand %xmm2, %xmm1, %xmm1 ; ALL-NEXT: vpand %xmm2, %xmm0, %xmm0 ; ALL-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0 Index: test/CodeGen/X86/avx512-insert-extract.ll =================================================================== --- test/CodeGen/X86/avx512-insert-extract.ll +++ test/CodeGen/X86/avx512-insert-extract.ll @@ -993,7 +993,6 @@ ; KNL-NEXT: vpcmpeqb %ymm1, %ymm0, %ymm0 ; KNL-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; KNL-NEXT: vpmovsxbd %xmm0, %zmm0 -; KNL-NEXT: vpslld $31, %zmm0, %zmm0 ; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0 ; KNL-NEXT: kshiftrw $2, %k0, %k0 ; KNL-NEXT: kmovw %k0, %eax @@ -1023,7 +1022,6 @@ ; KNL-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; KNL-NEXT: vextracti128 $1, %ymm0, %xmm0 ; KNL-NEXT: vpmovsxbd %xmm0, %zmm0 -; KNL-NEXT: vpslld $31, %zmm0, %zmm0 ; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0 ; KNL-NEXT: kshiftrw $15, %k0, %k0 ; KNL-NEXT: kmovw %k0, %eax @@ -1059,7 +1057,6 @@ ; KNL-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; KNL-NEXT: vextracti128 $1, %ymm0, %xmm0 ; KNL-NEXT: vpmovsxbd %xmm0, %zmm0 -; KNL-NEXT: vpslld $31, %zmm0, %zmm0 ; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0 ; KNL-NEXT: kshiftrw $15, %k0, %k0 ; KNL-NEXT: kmovw %k0, %eax Index: test/CodeGen/X86/avx512-schedule.ll =================================================================== --- test/CodeGen/X86/avx512-schedule.ll +++ test/CodeGen/X86/avx512-schedule.ll @@ -4711,7 +4711,7 @@ define <4 x i32> @zext_4xi1_to_4x32(<4 x i8> %x, <4 x i8> %y) #0 { ; GENERIC-LABEL: zext_4xi1_to_4x32: ; GENERIC: # %bb.0: -; GENERIC-NEXT: vmovdqa {{.*#+}} xmm2 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0] sched: [6:0.50] +; GENERIC-NEXT: vpbroadcastd {{.*#+}} xmm2 = [255,255,255,255] sched: [7:0.50] ; GENERIC-NEXT: vpand %xmm2, %xmm1, %xmm1 # sched: [1:0.33] ; GENERIC-NEXT: vpand %xmm2, %xmm0, %xmm0 # sched: [1:0.33] ; GENERIC-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 # sched: [1:0.50] @@ -4720,7 +4720,7 @@ ; ; SKX-LABEL: zext_4xi1_to_4x32: ; SKX: # %bb.0: -; SKX-NEXT: vmovdqa {{.*#+}} xmm2 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0] sched: [6:0.50] +; SKX-NEXT: vpbroadcastd {{.*#+}} xmm2 = [255,255,255,255] sched: [6:0.50] ; SKX-NEXT: vpand %xmm2, %xmm1, %xmm1 # sched: [1:0.33] ; SKX-NEXT: vpand %xmm2, %xmm0, %xmm0 # sched: [1:0.33] ; SKX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 # sched: [1:0.50] @@ -4734,7 +4734,7 @@ define <2 x i64> @zext_2xi1_to_2xi64(<2 x i8> %x, <2 x i8> %y) #0 { ; GENERIC-LABEL: zext_2xi1_to_2xi64: ; GENERIC: # %bb.0: -; GENERIC-NEXT: vmovdqa {{.*#+}} xmm2 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0] sched: [6:0.50] +; GENERIC-NEXT: vpbroadcastq {{.*#+}} xmm2 = [255,255] sched: [7:0.50] ; GENERIC-NEXT: vpand %xmm2, %xmm1, %xmm1 # sched: [1:0.33] ; GENERIC-NEXT: vpand %xmm2, %xmm0, %xmm0 # sched: [1:0.33] ; GENERIC-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0 # sched: [1:0.50] @@ -4743,7 +4743,7 @@ ; ; SKX-LABEL: zext_2xi1_to_2xi64: ; SKX: # %bb.0: -; SKX-NEXT: vmovdqa {{.*#+}} xmm2 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0] sched: [6:0.50] +; SKX-NEXT: vpbroadcastq {{.*#+}} xmm2 = [255,255] sched: [6:0.50] ; SKX-NEXT: vpand %xmm2, %xmm1, %xmm1 # sched: [1:0.33] ; SKX-NEXT: vpand %xmm2, %xmm0, %xmm0 # sched: [1:0.33] ; SKX-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0 # sched: [1:0.50] Index: test/CodeGen/X86/avx512vl-vec-masked-cmp.ll =================================================================== --- test/CodeGen/X86/avx512vl-vec-masked-cmp.ll +++ test/CodeGen/X86/avx512vl-vec-masked-cmp.ll @@ -9780,7 +9780,6 @@ ; NoVLX-NEXT: vpcmpgtb %xmm0, %xmm1, %xmm0 ; NoVLX-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0 -; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0 ; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax ; NoVLX-NEXT: vzeroupper @@ -9807,7 +9806,6 @@ ; NoVLX-NEXT: vpcmpgtb %xmm0, %xmm1, %xmm0 ; NoVLX-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0 -; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0 ; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax ; NoVLX-NEXT: vzeroupper @@ -9835,7 +9833,6 @@ ; NoVLX-NEXT: vpcmpgtb %xmm0, %xmm1, %xmm0 ; NoVLX-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0 -; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0 ; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax ; NoVLX-NEXT: andl %edi, %eax @@ -9866,7 +9863,6 @@ ; NoVLX-NEXT: vpcmpgtb %xmm0, %xmm1, %xmm0 ; NoVLX-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0 -; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0 ; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax ; NoVLX-NEXT: andl %edi, %eax @@ -9897,7 +9893,6 @@ ; NoVLX-NEXT: vpcmpgtb %xmm0, %xmm1, %xmm0 ; NoVLX-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0 -; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0 ; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax ; NoVLX-NEXT: movzwl %ax, %eax @@ -9925,7 +9920,6 @@ ; NoVLX-NEXT: vpcmpgtb %xmm0, %xmm1, %xmm0 ; NoVLX-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0 -; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0 ; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax ; NoVLX-NEXT: movzwl %ax, %eax @@ -9954,7 +9948,6 @@ ; NoVLX-NEXT: vpcmpgtb %xmm0, %xmm1, %xmm0 ; NoVLX-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0 -; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0 ; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax ; NoVLX-NEXT: andl %edi, %eax @@ -9985,7 +9978,6 @@ ; NoVLX-NEXT: vpcmpgtb %xmm0, %xmm1, %xmm0 ; NoVLX-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0 -; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0 ; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax ; NoVLX-NEXT: andl %edi, %eax @@ -10017,12 +10009,10 @@ ; NoVLX-NEXT: vpcmpgtb %ymm0, %ymm1, %ymm0 ; NoVLX-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm1 -; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1 ; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0 ; NoVLX-NEXT: kmovw %k0, %ecx ; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm0 ; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0 -; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0 ; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax ; NoVLX-NEXT: shll $16, %eax @@ -10052,12 +10042,10 @@ ; NoVLX-NEXT: vpcmpgtb %ymm0, %ymm1, %ymm0 ; NoVLX-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm1 -; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1 ; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0 ; NoVLX-NEXT: kmovw %k0, %ecx ; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm0 ; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0 -; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0 ; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax ; NoVLX-NEXT: shll $16, %eax @@ -10088,14 +10076,12 @@ ; NoVLX-NEXT: vpcmpgtb %ymm0, %ymm1, %ymm0 ; NoVLX-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm1 -; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1 ; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0 ; NoVLX-NEXT: kmovw %k0, %eax ; NoVLX-NEXT: andl %edi, %eax ; NoVLX-NEXT: shrl $16, %edi ; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm0 ; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0 -; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0 ; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %ecx ; NoVLX-NEXT: andl %edi, %ecx @@ -10130,14 +10116,12 @@ ; NoVLX-NEXT: vpcmpgtb %ymm0, %ymm1, %ymm0 ; NoVLX-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm1 -; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1 ; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0 ; NoVLX-NEXT: kmovw %k0, %eax ; NoVLX-NEXT: andl %edi, %eax ; NoVLX-NEXT: shrl $16, %edi ; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm0 ; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0 -; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0 ; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %ecx ; NoVLX-NEXT: andl %edi, %ecx @@ -10172,7 +10156,6 @@ ; NoVLX-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm0 ; NoVLX-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0 -; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0 ; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax ; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax @@ -10201,7 +10184,6 @@ ; NoVLX-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm0 ; NoVLX-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0 -; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0 ; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax ; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax @@ -10231,7 +10213,6 @@ ; NoVLX-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm0 ; NoVLX-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0 -; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kmovw %k0, %eax @@ -10264,7 +10245,6 @@ ; NoVLX-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm0 ; NoVLX-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0 -; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kmovw %k0, %eax @@ -10296,7 +10276,6 @@ ; NoVLX-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm0 ; NoVLX-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0 -; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0 ; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax ; NoVLX-NEXT: vzeroupper @@ -10323,7 +10302,6 @@ ; NoVLX-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm0 ; NoVLX-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0 -; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0 ; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax ; NoVLX-NEXT: vzeroupper @@ -10351,7 +10329,6 @@ ; NoVLX-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm0 ; NoVLX-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0 -; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kmovw %k0, %eax @@ -10382,7 +10359,6 @@ ; NoVLX-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm0 ; NoVLX-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0 -; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kmovw %k0, %eax @@ -10413,7 +10389,6 @@ ; NoVLX-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm0 ; NoVLX-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0 -; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0 ; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax ; NoVLX-NEXT: movzwl %ax, %eax @@ -10441,7 +10416,6 @@ ; NoVLX-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm0 ; NoVLX-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0 -; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0 ; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax ; NoVLX-NEXT: movzwl %ax, %eax @@ -10470,7 +10444,6 @@ ; NoVLX-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm0 ; NoVLX-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0 -; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kmovw %k0, %eax @@ -10502,7 +10475,6 @@ ; NoVLX-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm0 ; NoVLX-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0 -; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kmovw %k0, %eax @@ -10535,7 +10507,6 @@ ; NoVLX-NEXT: vpcmpgtw %ymm0, %ymm1, %ymm0 ; NoVLX-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; NoVLX-NEXT: vpmovsxwd %ymm0, %zmm0 -; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0 ; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax ; NoVLX-NEXT: vzeroupper @@ -10563,7 +10534,6 @@ ; NoVLX-NEXT: vpcmpgtw %ymm0, %ymm1, %ymm0 ; NoVLX-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; NoVLX-NEXT: vpmovsxwd %ymm0, %zmm0 -; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0 ; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax ; NoVLX-NEXT: vzeroupper @@ -10592,7 +10562,6 @@ ; NoVLX-NEXT: vpcmpgtw %ymm0, %ymm1, %ymm0 ; NoVLX-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; NoVLX-NEXT: vpmovsxwd %ymm0, %zmm0 -; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0 ; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax ; NoVLX-NEXT: andl %edi, %eax @@ -10624,7 +10593,6 @@ ; NoVLX-NEXT: vpcmpgtw %ymm0, %ymm1, %ymm0 ; NoVLX-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; NoVLX-NEXT: vpmovsxwd %ymm0, %zmm0 -; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0 ; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax ; NoVLX-NEXT: andl %edi, %eax @@ -10656,7 +10624,6 @@ ; NoVLX-NEXT: vpcmpgtw %ymm0, %ymm1, %ymm0 ; NoVLX-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; NoVLX-NEXT: vpmovsxwd %ymm0, %zmm0 -; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0 ; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax ; NoVLX-NEXT: movzwl %ax, %eax @@ -10685,7 +10652,6 @@ ; NoVLX-NEXT: vpcmpgtw %ymm0, %ymm1, %ymm0 ; NoVLX-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; NoVLX-NEXT: vpmovsxwd %ymm0, %zmm0 -; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0 ; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax ; NoVLX-NEXT: movzwl %ax, %eax @@ -10715,7 +10681,6 @@ ; NoVLX-NEXT: vpcmpgtw %ymm0, %ymm1, %ymm0 ; NoVLX-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; NoVLX-NEXT: vpmovsxwd %ymm0, %zmm0 -; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0 ; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax ; NoVLX-NEXT: andl %edi, %eax @@ -10747,7 +10712,6 @@ ; NoVLX-NEXT: vpcmpgtw %ymm0, %ymm1, %ymm0 ; NoVLX-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; NoVLX-NEXT: vpmovsxwd %ymm0, %zmm0 -; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0 ; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax ; NoVLX-NEXT: andl %edi, %eax @@ -10782,12 +10746,10 @@ ; NoVLX-NEXT: vpcmpgtw %ymm0, %ymm1, %ymm0 ; NoVLX-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; NoVLX-NEXT: vpmovsxwd %ymm0, %zmm0 -; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0 ; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %ecx ; NoVLX-NEXT: vpternlogq $15, %zmm2, %zmm2, %zmm2 ; NoVLX-NEXT: vpmovsxwd %ymm2, %zmm0 -; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0 ; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax ; NoVLX-NEXT: shll $16, %eax @@ -10820,12 +10782,10 @@ ; NoVLX-NEXT: vpcmpgtw %ymm0, %ymm2, %ymm0 ; NoVLX-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; NoVLX-NEXT: vpmovsxwd %ymm0, %zmm0 -; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0 ; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %ecx ; NoVLX-NEXT: vpternlogq $15, %zmm1, %zmm1, %zmm1 ; NoVLX-NEXT: vpmovsxwd %ymm1, %zmm0 -; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0 ; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax ; NoVLX-NEXT: shll $16, %eax @@ -10856,7 +10816,6 @@ ; NoVLX-NEXT: vpcmpgtw %ymm0, %ymm1, %ymm2 ; NoVLX-NEXT: vpternlogq $15, %zmm2, %zmm2, %zmm2 ; NoVLX-NEXT: vpmovsxwd %ymm2, %zmm2 -; NoVLX-NEXT: vpslld $31, %zmm2, %zmm2 ; NoVLX-NEXT: vptestmd %zmm2, %zmm2, %k0 ; NoVLX-NEXT: kmovw %k0, %eax ; NoVLX-NEXT: andl %edi, %eax @@ -10866,7 +10825,6 @@ ; NoVLX-NEXT: vpcmpgtw %ymm0, %ymm1, %ymm0 ; NoVLX-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; NoVLX-NEXT: vpmovsxwd %ymm0, %zmm0 -; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0 ; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %ecx ; NoVLX-NEXT: andl %edi, %ecx @@ -10901,7 +10859,6 @@ ; NoVLX-NEXT: vpcmpgtw %ymm0, %ymm1, %ymm1 ; NoVLX-NEXT: vpternlogq $15, %zmm1, %zmm1, %zmm1 ; NoVLX-NEXT: vpmovsxwd %ymm1, %zmm1 -; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1 ; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0 ; NoVLX-NEXT: kmovw %k0, %eax ; NoVLX-NEXT: andl %edi, %eax @@ -10911,7 +10868,6 @@ ; NoVLX-NEXT: vpcmpgtw %ymm0, %ymm1, %ymm0 ; NoVLX-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; NoVLX-NEXT: vpmovsxwd %ymm0, %zmm0 -; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0 ; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %ecx ; NoVLX-NEXT: andl %edi, %ecx @@ -14768,7 +14724,6 @@ ; NoVLX-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0 ; NoVLX-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0 -; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0 ; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax ; NoVLX-NEXT: vzeroupper @@ -14795,7 +14750,6 @@ ; NoVLX-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0 ; NoVLX-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0 -; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0 ; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax ; NoVLX-NEXT: vzeroupper @@ -14824,7 +14778,6 @@ ; NoVLX-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0 ; NoVLX-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0 -; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0 ; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax ; NoVLX-NEXT: andl %edi, %eax @@ -14855,7 +14808,6 @@ ; NoVLX-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0 ; NoVLX-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0 -; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0 ; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax ; NoVLX-NEXT: andl %edi, %eax @@ -14887,7 +14839,6 @@ ; NoVLX-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0 ; NoVLX-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0 -; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0 ; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax ; NoVLX-NEXT: movzwl %ax, %eax @@ -14915,7 +14866,6 @@ ; NoVLX-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0 ; NoVLX-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0 -; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0 ; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax ; NoVLX-NEXT: movzwl %ax, %eax @@ -14945,7 +14895,6 @@ ; NoVLX-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0 ; NoVLX-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0 -; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0 ; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax ; NoVLX-NEXT: andl %edi, %eax @@ -14976,7 +14925,6 @@ ; NoVLX-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0 ; NoVLX-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0 -; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0 ; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax ; NoVLX-NEXT: andl %edi, %eax @@ -15009,12 +14957,10 @@ ; NoVLX-NEXT: vpcmpeqb %ymm1, %ymm0, %ymm0 ; NoVLX-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm1 -; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1 ; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0 ; NoVLX-NEXT: kmovw %k0, %ecx ; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm0 ; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0 -; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0 ; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax ; NoVLX-NEXT: shll $16, %eax @@ -15044,12 +14990,10 @@ ; NoVLX-NEXT: vpcmpeqb %ymm1, %ymm0, %ymm0 ; NoVLX-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm1 -; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1 ; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0 ; NoVLX-NEXT: kmovw %k0, %ecx ; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm0 ; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0 -; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0 ; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax ; NoVLX-NEXT: shll $16, %eax @@ -15081,14 +15025,12 @@ ; NoVLX-NEXT: vpcmpeqb %ymm1, %ymm0, %ymm0 ; NoVLX-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm1 -; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1 ; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0 ; NoVLX-NEXT: kmovw %k0, %eax ; NoVLX-NEXT: andl %edi, %eax ; NoVLX-NEXT: shrl $16, %edi ; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm0 ; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0 -; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0 ; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %ecx ; NoVLX-NEXT: andl %edi, %ecx @@ -15123,14 +15065,12 @@ ; NoVLX-NEXT: vpcmpeqb %ymm1, %ymm0, %ymm0 ; NoVLX-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm1 -; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1 ; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0 ; NoVLX-NEXT: kmovw %k0, %eax ; NoVLX-NEXT: andl %edi, %eax ; NoVLX-NEXT: shrl $16, %edi ; NoVLX-NEXT: vextracti128 $1, %ymm0, %xmm0 ; NoVLX-NEXT: vpmovsxbd %xmm0, %zmm0 -; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0 ; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %ecx ; NoVLX-NEXT: andl %edi, %ecx @@ -15166,7 +15106,6 @@ ; NoVLX-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0 ; NoVLX-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0 -; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0 ; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax ; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax @@ -15195,7 +15134,6 @@ ; NoVLX-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0 ; NoVLX-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0 -; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0 ; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax ; NoVLX-NEXT: # kill: def $ax killed $ax killed $eax @@ -15226,7 +15164,6 @@ ; NoVLX-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0 ; NoVLX-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0 -; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kmovw %k0, %eax @@ -15259,7 +15196,6 @@ ; NoVLX-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0 ; NoVLX-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0 -; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kmovw %k0, %eax @@ -15292,7 +15228,6 @@ ; NoVLX-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0 ; NoVLX-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0 -; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0 ; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax ; NoVLX-NEXT: vzeroupper @@ -15319,7 +15254,6 @@ ; NoVLX-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0 ; NoVLX-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0 -; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0 ; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax ; NoVLX-NEXT: vzeroupper @@ -15348,7 +15282,6 @@ ; NoVLX-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0 ; NoVLX-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0 -; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kmovw %k0, %eax @@ -15379,7 +15312,6 @@ ; NoVLX-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0 ; NoVLX-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0 -; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kmovw %k0, %eax @@ -15411,7 +15343,6 @@ ; NoVLX-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0 ; NoVLX-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0 -; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0 ; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax ; NoVLX-NEXT: movzwl %ax, %eax @@ -15439,7 +15370,6 @@ ; NoVLX-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0 ; NoVLX-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0 -; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0 ; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax ; NoVLX-NEXT: movzwl %ax, %eax @@ -15469,7 +15399,6 @@ ; NoVLX-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0 ; NoVLX-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0 -; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kmovw %k0, %eax @@ -15501,7 +15430,6 @@ ; NoVLX-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0 ; NoVLX-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; NoVLX-NEXT: vpmovsxwq %xmm0, %zmm0 -; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0 ; NoVLX-NEXT: kmovw %edi, %k1 ; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1} ; NoVLX-NEXT: kmovw %k0, %eax @@ -15535,7 +15463,6 @@ ; NoVLX-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0 ; NoVLX-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; NoVLX-NEXT: vpmovsxwd %ymm0, %zmm0 -; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0 ; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax ; NoVLX-NEXT: vzeroupper @@ -15563,7 +15490,6 @@ ; NoVLX-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0 ; NoVLX-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; NoVLX-NEXT: vpmovsxwd %ymm0, %zmm0 -; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0 ; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax ; NoVLX-NEXT: vzeroupper @@ -15593,7 +15519,6 @@ ; NoVLX-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0 ; NoVLX-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; NoVLX-NEXT: vpmovsxwd %ymm0, %zmm0 -; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0 ; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax ; NoVLX-NEXT: andl %edi, %eax @@ -15625,7 +15550,6 @@ ; NoVLX-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0 ; NoVLX-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; NoVLX-NEXT: vpmovsxwd %ymm0, %zmm0 -; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0 ; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax ; NoVLX-NEXT: andl %edi, %eax @@ -15658,7 +15582,6 @@ ; NoVLX-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0 ; NoVLX-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; NoVLX-NEXT: vpmovsxwd %ymm0, %zmm0 -; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0 ; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax ; NoVLX-NEXT: movzwl %ax, %eax @@ -15687,7 +15610,6 @@ ; NoVLX-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0 ; NoVLX-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; NoVLX-NEXT: vpmovsxwd %ymm0, %zmm0 -; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0 ; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax ; NoVLX-NEXT: movzwl %ax, %eax @@ -15718,7 +15640,6 @@ ; NoVLX-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0 ; NoVLX-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; NoVLX-NEXT: vpmovsxwd %ymm0, %zmm0 -; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0 ; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax ; NoVLX-NEXT: andl %edi, %eax @@ -15750,7 +15671,6 @@ ; NoVLX-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0 ; NoVLX-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; NoVLX-NEXT: vpmovsxwd %ymm0, %zmm0 -; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0 ; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax ; NoVLX-NEXT: andl %edi, %eax @@ -15785,14 +15705,12 @@ ; NoVLX-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0 ; NoVLX-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; NoVLX-NEXT: vpmovsxwd %ymm0, %zmm0 -; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0 ; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %ecx ; NoVLX-NEXT: vpmaxuw %ymm3, %ymm2, %ymm0 ; NoVLX-NEXT: vpcmpeqw %ymm0, %ymm2, %ymm0 ; NoVLX-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; NoVLX-NEXT: vpmovsxwd %ymm0, %zmm0 -; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0 ; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax ; NoVLX-NEXT: shll $16, %eax @@ -15823,14 +15741,12 @@ ; NoVLX-NEXT: vpcmpeqw %ymm2, %ymm0, %ymm0 ; NoVLX-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; NoVLX-NEXT: vpmovsxwd %ymm0, %zmm0 -; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0 ; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %ecx ; NoVLX-NEXT: vpmaxuw 32(%rdi), %ymm1, %ymm0 ; NoVLX-NEXT: vpcmpeqw %ymm0, %ymm1, %ymm0 ; NoVLX-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; NoVLX-NEXT: vpmovsxwd %ymm0, %zmm0 -; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0 ; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %eax ; NoVLX-NEXT: shll $16, %eax @@ -15862,7 +15778,6 @@ ; NoVLX-NEXT: vpcmpeqw %ymm2, %ymm0, %ymm2 ; NoVLX-NEXT: vpternlogq $15, %zmm2, %zmm2, %zmm2 ; NoVLX-NEXT: vpmovsxwd %ymm2, %zmm2 -; NoVLX-NEXT: vpslld $31, %zmm2, %zmm2 ; NoVLX-NEXT: vptestmd %zmm2, %zmm2, %k0 ; NoVLX-NEXT: kmovw %k0, %eax ; NoVLX-NEXT: andl %edi, %eax @@ -15873,7 +15788,6 @@ ; NoVLX-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0 ; NoVLX-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; NoVLX-NEXT: vpmovsxwd %ymm0, %zmm0 -; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0 ; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %ecx ; NoVLX-NEXT: andl %edi, %ecx @@ -15908,7 +15822,6 @@ ; NoVLX-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm1 ; NoVLX-NEXT: vpternlogq $15, %zmm1, %zmm1, %zmm1 ; NoVLX-NEXT: vpmovsxwd %ymm1, %zmm1 -; NoVLX-NEXT: vpslld $31, %zmm1, %zmm1 ; NoVLX-NEXT: vptestmd %zmm1, %zmm1, %k0 ; NoVLX-NEXT: kmovw %k0, %eax ; NoVLX-NEXT: andl %edi, %eax @@ -15918,7 +15831,6 @@ ; NoVLX-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0 ; NoVLX-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; NoVLX-NEXT: vpmovsxwd %ymm0, %zmm0 -; NoVLX-NEXT: vpslld $31, %zmm0, %zmm0 ; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0 ; NoVLX-NEXT: kmovw %k0, %ecx ; NoVLX-NEXT: andl %edi, %ecx Index: test/CodeGen/X86/cast-vsel.ll =================================================================== --- test/CodeGen/X86/cast-vsel.ll +++ test/CodeGen/X86/cast-vsel.ll @@ -357,17 +357,16 @@ ; AVX2-LABEL: example25: ; AVX2: # %bb.0: # %vector.ph ; AVX2-NEXT: movq $-4096, %rax # imm = 0xF000 -; AVX2-NEXT: vbroadcastss {{.*#+}} ymm0 = [1,1,1,1,1,1,1,1] ; AVX2-NEXT: .p2align 4, 0x90 ; AVX2-NEXT: .LBB5_1: # %vector.body ; AVX2-NEXT: # =>This Inner Loop Header: Depth=1 -; AVX2-NEXT: vmovups da+4096(%rax), %ymm1 -; AVX2-NEXT: vcmpltps db+4096(%rax), %ymm1, %ymm1 -; AVX2-NEXT: vmovups dc+4096(%rax), %ymm2 -; AVX2-NEXT: vcmpltps dd+4096(%rax), %ymm2, %ymm2 -; AVX2-NEXT: vandps %ymm0, %ymm2, %ymm2 -; AVX2-NEXT: vandps %ymm2, %ymm1, %ymm1 -; AVX2-NEXT: vmovups %ymm1, dj+4096(%rax) +; AVX2-NEXT: vmovups da+4096(%rax), %ymm0 +; AVX2-NEXT: vcmpltps db+4096(%rax), %ymm0, %ymm0 +; AVX2-NEXT: vmovups dc+4096(%rax), %ymm1 +; AVX2-NEXT: vcmpltps dd+4096(%rax), %ymm1, %ymm1 +; AVX2-NEXT: vandps %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: vpsrld $31, %ymm0, %ymm0 +; AVX2-NEXT: vmovdqu %ymm0, dj+4096(%rax) ; AVX2-NEXT: addq $32, %rax ; AVX2-NEXT: jne .LBB5_1 ; AVX2-NEXT: # %bb.2: # %for.end Index: test/CodeGen/X86/combine-sdiv.ll =================================================================== --- test/CodeGen/X86/combine-sdiv.ll +++ test/CodeGen/X86/combine-sdiv.ll @@ -726,7 +726,8 @@ ; AVX1-NEXT: vpsraw $1, %xmm3, %xmm3 ; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3,4,5,6],xmm3[7] ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1 -; AVX1-NEXT: vmovaps {{.*#+}} ymm2 = [0,65535,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,65535] +; AVX1-NEXT: vbroadcastf128 {{.*#+}} ymm2 = [0,65535,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,65535] +; AVX1-NEXT: # ymm2 = mem[0,1,0,1] ; AVX1-NEXT: vandps %ymm2, %ymm1, %ymm1 ; AVX1-NEXT: vandnps %ymm0, %ymm2, %ymm0 ; AVX1-NEXT: vorps %ymm0, %ymm1, %ymm0 @@ -777,7 +778,9 @@ ; XOP-NEXT: vpaddw %xmm3, %xmm0, %xmm3 ; XOP-NEXT: vpshaw %xmm2, %xmm3, %xmm2 ; XOP-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1 -; XOP-NEXT: vpcmov {{.*}}(%rip), %ymm0, %ymm1, %ymm0 +; XOP-NEXT: vbroadcastf128 {{.*#+}} ymm2 = [0,65535,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,65535] +; XOP-NEXT: # ymm2 = mem[0,1,0,1] +; XOP-NEXT: vpcmov %ymm2, %ymm0, %ymm1, %ymm0 ; XOP-NEXT: retq %1 = sdiv <16 x i16> %x, ret <16 x i16> %1 @@ -960,7 +963,8 @@ ; AVX1-NEXT: vpsraw $1, %xmm5, %xmm5 ; AVX1-NEXT: vpblendw {{.*#+}} xmm5 = xmm6[0,1],xmm5[2],xmm6[3,4,5,6],xmm5[7] ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm5, %ymm2 -; AVX1-NEXT: vmovaps {{.*#+}} ymm5 = [0,65535,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,65535] +; AVX1-NEXT: vbroadcastf128 {{.*#+}} ymm5 = [0,65535,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,65535] +; AVX1-NEXT: # ymm5 = mem[0,1,0,1] ; AVX1-NEXT: vandps %ymm5, %ymm2, %ymm2 ; AVX1-NEXT: vandnps %ymm0, %ymm5, %ymm0 ; AVX1-NEXT: vorps %ymm0, %ymm2, %ymm0 @@ -1055,7 +1059,8 @@ ; XOP-NEXT: vpaddw %xmm5, %xmm0, %xmm5 ; XOP-NEXT: vpshaw %xmm3, %xmm5, %xmm5 ; XOP-NEXT: vinsertf128 $1, %xmm2, %ymm5, %ymm2 -; XOP-NEXT: vmovdqa {{.*#+}} ymm5 = [0,65535,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,65535] +; XOP-NEXT: vbroadcastf128 {{.*#+}} ymm5 = [0,65535,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,65535] +; XOP-NEXT: # ymm5 = mem[0,1,0,1] ; XOP-NEXT: vpcmov %ymm5, %ymm0, %ymm2, %ymm0 ; XOP-NEXT: vextractf128 $1, %ymm1, %xmm2 ; XOP-NEXT: vpsraw $15, %xmm2, %xmm6 Index: test/CodeGen/X86/combine-srl.ll =================================================================== --- test/CodeGen/X86/combine-srl.ll +++ test/CodeGen/X86/combine-srl.ll @@ -357,55 +357,50 @@ ; SSE-LABEL: combine_vec_lshr_lzcnt_bit1: ; SSE: # %bb.0: ; SSE-NEXT: pand {{.*}}(%rip), %xmm0 -; SSE-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] -; SSE-NEXT: movdqa %xmm0, %xmm1 -; SSE-NEXT: pand %xmm2, %xmm1 -; SSE-NEXT: movdqa {{.*#+}} xmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0] -; SSE-NEXT: movdqa %xmm3, %xmm4 -; SSE-NEXT: pshufb %xmm1, %xmm4 ; SSE-NEXT: movdqa %xmm0, %xmm1 ; SSE-NEXT: psrlw $4, %xmm1 -; SSE-NEXT: pand %xmm2, %xmm1 ; SSE-NEXT: pxor %xmm2, %xmm2 +; SSE-NEXT: movdqa {{.*#+}} xmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0] +; SSE-NEXT: movdqa %xmm3, %xmm4 ; SSE-NEXT: pshufb %xmm1, %xmm3 ; SSE-NEXT: pcmpeqb %xmm2, %xmm1 -; SSE-NEXT: pand %xmm4, %xmm1 -; SSE-NEXT: paddb %xmm3, %xmm1 -; SSE-NEXT: movdqa %xmm0, %xmm3 -; SSE-NEXT: pcmpeqb %xmm2, %xmm3 -; SSE-NEXT: psrlw $8, %xmm3 -; SSE-NEXT: pand %xmm1, %xmm3 +; SSE-NEXT: movdqa {{.*#+}} xmm5 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; SSE-NEXT: pand %xmm0, %xmm5 +; SSE-NEXT: pshufb %xmm5, %xmm4 +; SSE-NEXT: pand %xmm1, %xmm4 +; SSE-NEXT: paddb %xmm4, %xmm3 +; SSE-NEXT: movdqa %xmm0, %xmm1 +; SSE-NEXT: pcmpeqb %xmm2, %xmm1 ; SSE-NEXT: psrlw $8, %xmm1 -; SSE-NEXT: paddw %xmm3, %xmm1 +; SSE-NEXT: pand %xmm3, %xmm1 +; SSE-NEXT: psrlw $8, %xmm3 +; SSE-NEXT: paddw %xmm1, %xmm3 ; SSE-NEXT: pcmpeqw %xmm2, %xmm0 ; SSE-NEXT: psrld $16, %xmm0 -; SSE-NEXT: pand %xmm1, %xmm0 -; SSE-NEXT: psrld $16, %xmm1 -; SSE-NEXT: paddd %xmm0, %xmm1 -; SSE-NEXT: psrld $5, %xmm1 -; SSE-NEXT: movdqa %xmm1, %xmm0 +; SSE-NEXT: pand %xmm3, %xmm0 +; SSE-NEXT: psrld $16, %xmm3 +; SSE-NEXT: paddd %xmm3, %xmm0 +; SSE-NEXT: psrld $5, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: combine_vec_lshr_lzcnt_bit1: ; AVX: # %bb.0: ; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 -; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] -; AVX-NEXT: vpand %xmm1, %xmm0, %xmm2 -; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0] -; AVX-NEXT: vpshufb %xmm2, %xmm3, %xmm2 -; AVX-NEXT: vpsrlw $4, %xmm0, %xmm4 -; AVX-NEXT: vpand %xmm1, %xmm4, %xmm1 -; AVX-NEXT: vpxor %xmm4, %xmm4, %xmm4 -; AVX-NEXT: vpcmpeqb %xmm4, %xmm1, %xmm5 -; AVX-NEXT: vpand %xmm5, %xmm2, %xmm2 -; AVX-NEXT: vpshufb %xmm1, %xmm3, %xmm1 -; AVX-NEXT: vpaddb %xmm1, %xmm2, %xmm1 -; AVX-NEXT: vpcmpeqb %xmm4, %xmm0, %xmm2 -; AVX-NEXT: vpsrlw $8, %xmm2, %xmm2 -; AVX-NEXT: vpand %xmm2, %xmm1, %xmm2 +; AVX-NEXT: vpsrlw $4, %xmm0, %xmm1 +; AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX-NEXT: vpcmpeqb %xmm2, %xmm1, %xmm3 +; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm4 +; AVX-NEXT: vmovdqa {{.*#+}} xmm5 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0] +; AVX-NEXT: vpshufb %xmm4, %xmm5, %xmm4 +; AVX-NEXT: vpand %xmm3, %xmm4, %xmm3 +; AVX-NEXT: vpshufb %xmm1, %xmm5, %xmm1 +; AVX-NEXT: vpaddb %xmm1, %xmm3, %xmm1 +; AVX-NEXT: vpcmpeqb %xmm2, %xmm0, %xmm3 +; AVX-NEXT: vpsrlw $8, %xmm3, %xmm3 +; AVX-NEXT: vpand %xmm3, %xmm1, %xmm3 ; AVX-NEXT: vpsrlw $8, %xmm1, %xmm1 -; AVX-NEXT: vpaddw %xmm2, %xmm1, %xmm1 -; AVX-NEXT: vpcmpeqw %xmm4, %xmm0, %xmm0 +; AVX-NEXT: vpaddw %xmm3, %xmm1, %xmm1 +; AVX-NEXT: vpcmpeqw %xmm2, %xmm0, %xmm0 ; AVX-NEXT: vpsrld $16, %xmm0, %xmm0 ; AVX-NEXT: vpand %xmm0, %xmm1, %xmm0 ; AVX-NEXT: vpsrld $16, %xmm1, %xmm1 Index: test/CodeGen/X86/gather-addresses.ll =================================================================== --- test/CodeGen/X86/gather-addresses.ll +++ test/CodeGen/X86/gather-addresses.ll @@ -149,11 +149,11 @@ ; LIN-SSE2-NEXT: andl %ecx, %edx ; LIN-SSE2-NEXT: andl %ecx, %esi ; LIN-SSE2-NEXT: andl %ecx, %edi -; LIN-SSE2-NEXT: movd %eax, %xmm0 -; LIN-SSE2-NEXT: movd %edx, %xmm1 +; LIN-SSE2-NEXT: movq %rax, %xmm0 +; LIN-SSE2-NEXT: movq %rdx, %xmm1 ; LIN-SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] -; LIN-SSE2-NEXT: movd %edi, %xmm2 -; LIN-SSE2-NEXT: movd %esi, %xmm1 +; LIN-SSE2-NEXT: movq %rdi, %xmm2 +; LIN-SSE2-NEXT: movq %rsi, %xmm1 ; LIN-SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] ; LIN-SSE2-NEXT: retq ; @@ -169,11 +169,11 @@ ; LIN-SSE4-NEXT: andl %ecx, %edx ; LIN-SSE4-NEXT: andl %ecx, %esi ; LIN-SSE4-NEXT: andl %ecx, %edi -; LIN-SSE4-NEXT: movd %edx, %xmm1 -; LIN-SSE4-NEXT: movd %eax, %xmm0 +; LIN-SSE4-NEXT: movq %rdx, %xmm1 +; LIN-SSE4-NEXT: movq %rax, %xmm0 ; LIN-SSE4-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] -; LIN-SSE4-NEXT: movd %edi, %xmm2 -; LIN-SSE4-NEXT: movd %esi, %xmm1 +; LIN-SSE4-NEXT: movq %rdi, %xmm2 +; LIN-SSE4-NEXT: movq %rsi, %xmm1 ; LIN-SSE4-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] ; LIN-SSE4-NEXT: retq ; @@ -192,11 +192,11 @@ ; WIN-SSE2-NEXT: andl %r9d, %ecx ; WIN-SSE2-NEXT: andl %r9d, %r8d ; WIN-SSE2-NEXT: andl %r9d, %edx -; WIN-SSE2-NEXT: movd %eax, %xmm0 -; WIN-SSE2-NEXT: movd %ecx, %xmm1 +; WIN-SSE2-NEXT: movq %rax, %xmm0 +; WIN-SSE2-NEXT: movq %rcx, %xmm1 ; WIN-SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] -; WIN-SSE2-NEXT: movd %edx, %xmm2 -; WIN-SSE2-NEXT: movd %r8d, %xmm1 +; WIN-SSE2-NEXT: movq %rdx, %xmm2 +; WIN-SSE2-NEXT: movq %r8, %xmm1 ; WIN-SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] ; WIN-SSE2-NEXT: retq ; @@ -212,11 +212,11 @@ ; WIN-SSE4-NEXT: andl %r9d, %ecx ; WIN-SSE4-NEXT: andl %r9d, %r8d ; WIN-SSE4-NEXT: andl %r9d, %edx -; WIN-SSE4-NEXT: movd %ecx, %xmm1 -; WIN-SSE4-NEXT: movd %eax, %xmm0 +; WIN-SSE4-NEXT: movq %rcx, %xmm1 +; WIN-SSE4-NEXT: movq %rax, %xmm0 ; WIN-SSE4-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] -; WIN-SSE4-NEXT: movd %edx, %xmm2 -; WIN-SSE4-NEXT: movd %r8d, %xmm1 +; WIN-SSE4-NEXT: movq %rdx, %xmm2 +; WIN-SSE4-NEXT: movq %r8, %xmm1 ; WIN-SSE4-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] ; WIN-SSE4-NEXT: retq ; Index: test/CodeGen/X86/horizontal-reduce-umax.ll =================================================================== --- test/CodeGen/X86/horizontal-reduce-umax.ll +++ test/CodeGen/X86/horizontal-reduce-umax.ll @@ -230,15 +230,14 @@ ; X86-SSE2-NEXT: pxor %xmm2, %xmm0 ; X86-SSE2-NEXT: pxor %xmm2, %xmm1 ; X86-SSE2-NEXT: pmaxsw %xmm0, %xmm1 -; X86-SSE2-NEXT: pxor %xmm2, %xmm1 -; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3] -; X86-SSE2-NEXT: pxor %xmm2, %xmm1 +; X86-SSE2-NEXT: movdqa %xmm1, %xmm0 ; X86-SSE2-NEXT: pxor %xmm2, %xmm0 -; X86-SSE2-NEXT: pmaxsw %xmm1, %xmm0 +; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3] ; X86-SSE2-NEXT: pxor %xmm2, %xmm0 +; X86-SSE2-NEXT: pmaxsw %xmm1, %xmm0 ; X86-SSE2-NEXT: movdqa %xmm0, %xmm1 +; X86-SSE2-NEXT: pxor %xmm2, %xmm1 ; X86-SSE2-NEXT: psrld $16, %xmm1 -; X86-SSE2-NEXT: pxor %xmm2, %xmm0 ; X86-SSE2-NEXT: pxor %xmm2, %xmm1 ; X86-SSE2-NEXT: pmaxsw %xmm0, %xmm1 ; X86-SSE2-NEXT: pxor %xmm2, %xmm1 @@ -273,15 +272,14 @@ ; X64-SSE2-NEXT: pxor %xmm2, %xmm0 ; X64-SSE2-NEXT: pxor %xmm2, %xmm1 ; X64-SSE2-NEXT: pmaxsw %xmm0, %xmm1 -; X64-SSE2-NEXT: pxor %xmm2, %xmm1 -; X64-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3] -; X64-SSE2-NEXT: pxor %xmm2, %xmm1 +; X64-SSE2-NEXT: movdqa %xmm1, %xmm0 ; X64-SSE2-NEXT: pxor %xmm2, %xmm0 -; X64-SSE2-NEXT: pmaxsw %xmm1, %xmm0 +; X64-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3] ; X64-SSE2-NEXT: pxor %xmm2, %xmm0 +; X64-SSE2-NEXT: pmaxsw %xmm1, %xmm0 ; X64-SSE2-NEXT: movdqa %xmm0, %xmm1 +; X64-SSE2-NEXT: pxor %xmm2, %xmm1 ; X64-SSE2-NEXT: psrld $16, %xmm1 -; X64-SSE2-NEXT: pxor %xmm2, %xmm0 ; X64-SSE2-NEXT: pxor %xmm2, %xmm1 ; X64-SSE2-NEXT: pmaxsw %xmm0, %xmm1 ; X64-SSE2-NEXT: pxor %xmm2, %xmm1 @@ -832,20 +830,19 @@ ; X86-SSE2-NEXT: pxor %xmm2, %xmm1 ; X86-SSE2-NEXT: pxor %xmm2, %xmm0 ; X86-SSE2-NEXT: pmaxsw %xmm1, %xmm0 -; X86-SSE2-NEXT: pxor %xmm2, %xmm0 -; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; X86-SSE2-NEXT: pxor %xmm2, %xmm0 -; X86-SSE2-NEXT: pxor %xmm2, %xmm1 -; X86-SSE2-NEXT: pmaxsw %xmm0, %xmm1 +; X86-SSE2-NEXT: movdqa %xmm0, %xmm1 ; X86-SSE2-NEXT: pxor %xmm2, %xmm1 -; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3] +; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] ; X86-SSE2-NEXT: pxor %xmm2, %xmm1 +; X86-SSE2-NEXT: pmaxsw %xmm0, %xmm1 +; X86-SSE2-NEXT: movdqa %xmm1, %xmm0 ; X86-SSE2-NEXT: pxor %xmm2, %xmm0 -; X86-SSE2-NEXT: pmaxsw %xmm1, %xmm0 +; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3] ; X86-SSE2-NEXT: pxor %xmm2, %xmm0 +; X86-SSE2-NEXT: pmaxsw %xmm1, %xmm0 ; X86-SSE2-NEXT: movdqa %xmm0, %xmm1 +; X86-SSE2-NEXT: pxor %xmm2, %xmm1 ; X86-SSE2-NEXT: psrld $16, %xmm1 -; X86-SSE2-NEXT: pxor %xmm2, %xmm0 ; X86-SSE2-NEXT: pxor %xmm2, %xmm1 ; X86-SSE2-NEXT: pmaxsw %xmm0, %xmm1 ; X86-SSE2-NEXT: pxor %xmm2, %xmm1 @@ -896,20 +893,19 @@ ; X64-SSE2-NEXT: pxor %xmm2, %xmm1 ; X64-SSE2-NEXT: pxor %xmm2, %xmm0 ; X64-SSE2-NEXT: pmaxsw %xmm1, %xmm0 -; X64-SSE2-NEXT: pxor %xmm2, %xmm0 -; X64-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; X64-SSE2-NEXT: pxor %xmm2, %xmm0 -; X64-SSE2-NEXT: pxor %xmm2, %xmm1 -; X64-SSE2-NEXT: pmaxsw %xmm0, %xmm1 +; X64-SSE2-NEXT: movdqa %xmm0, %xmm1 ; X64-SSE2-NEXT: pxor %xmm2, %xmm1 -; X64-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3] +; X64-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] ; X64-SSE2-NEXT: pxor %xmm2, %xmm1 +; X64-SSE2-NEXT: pmaxsw %xmm0, %xmm1 +; X64-SSE2-NEXT: movdqa %xmm1, %xmm0 ; X64-SSE2-NEXT: pxor %xmm2, %xmm0 -; X64-SSE2-NEXT: pmaxsw %xmm1, %xmm0 +; X64-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3] ; X64-SSE2-NEXT: pxor %xmm2, %xmm0 +; X64-SSE2-NEXT: pmaxsw %xmm1, %xmm0 ; X64-SSE2-NEXT: movdqa %xmm0, %xmm1 +; X64-SSE2-NEXT: pxor %xmm2, %xmm1 ; X64-SSE2-NEXT: psrld $16, %xmm1 -; X64-SSE2-NEXT: pxor %xmm2, %xmm0 ; X64-SSE2-NEXT: pxor %xmm2, %xmm1 ; X64-SSE2-NEXT: pmaxsw %xmm0, %xmm1 ; X64-SSE2-NEXT: pxor %xmm2, %xmm1 @@ -1670,35 +1666,30 @@ ; X86-SSE2-LABEL: test_reduce_v32i16: ; X86-SSE2: ## %bb.0: ; X86-SSE2-NEXT: movdqa {{.*#+}} xmm4 = [32768,32768,32768,32768,32768,32768,32768,32768] -; X86-SSE2-NEXT: pxor %xmm4, %xmm2 -; X86-SSE2-NEXT: pxor %xmm4, %xmm0 -; X86-SSE2-NEXT: pmaxsw %xmm2, %xmm0 ; X86-SSE2-NEXT: pxor %xmm4, %xmm3 ; X86-SSE2-NEXT: pxor %xmm4, %xmm1 ; X86-SSE2-NEXT: pmaxsw %xmm3, %xmm1 -; X86-SSE2-NEXT: movdqa %xmm4, %xmm2 -; X86-SSE2-NEXT: pxor %xmm4, %xmm2 -; X86-SSE2-NEXT: pxor %xmm2, %xmm1 -; X86-SSE2-NEXT: pxor %xmm0, %xmm2 -; X86-SSE2-NEXT: pmaxsw %xmm1, %xmm2 -; X86-SSE2-NEXT: pxor %xmm4, %xmm2 -; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1] ; X86-SSE2-NEXT: pxor %xmm4, %xmm2 ; X86-SSE2-NEXT: pxor %xmm4, %xmm0 ; X86-SSE2-NEXT: pmaxsw %xmm2, %xmm0 -; X86-SSE2-NEXT: pxor %xmm4, %xmm0 -; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] -; X86-SSE2-NEXT: pxor %xmm4, %xmm0 +; X86-SSE2-NEXT: pmaxsw %xmm1, %xmm0 +; X86-SSE2-NEXT: movdqa %xmm0, %xmm1 ; X86-SSE2-NEXT: pxor %xmm4, %xmm1 -; X86-SSE2-NEXT: pmaxsw %xmm0, %xmm1 +; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] ; X86-SSE2-NEXT: pxor %xmm4, %xmm1 +; X86-SSE2-NEXT: pmaxsw %xmm0, %xmm1 ; X86-SSE2-NEXT: movdqa %xmm1, %xmm0 -; X86-SSE2-NEXT: psrld $16, %xmm0 -; X86-SSE2-NEXT: pxor %xmm4, %xmm1 ; X86-SSE2-NEXT: pxor %xmm4, %xmm0 -; X86-SSE2-NEXT: pmaxsw %xmm1, %xmm0 +; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3] ; X86-SSE2-NEXT: pxor %xmm4, %xmm0 -; X86-SSE2-NEXT: movd %xmm0, %eax +; X86-SSE2-NEXT: pmaxsw %xmm1, %xmm0 +; X86-SSE2-NEXT: movdqa %xmm0, %xmm1 +; X86-SSE2-NEXT: pxor %xmm4, %xmm1 +; X86-SSE2-NEXT: psrld $16, %xmm1 +; X86-SSE2-NEXT: pxor %xmm4, %xmm1 +; X86-SSE2-NEXT: pmaxsw %xmm0, %xmm1 +; X86-SSE2-NEXT: pxor %xmm4, %xmm1 +; X86-SSE2-NEXT: movd %xmm1, %eax ; X86-SSE2-NEXT: ## kill: def $ax killed $ax killed $eax ; X86-SSE2-NEXT: retl ; @@ -1748,35 +1739,30 @@ ; X64-SSE2-LABEL: test_reduce_v32i16: ; X64-SSE2: ## %bb.0: ; X64-SSE2-NEXT: movdqa {{.*#+}} xmm4 = [32768,32768,32768,32768,32768,32768,32768,32768] -; X64-SSE2-NEXT: pxor %xmm4, %xmm2 -; X64-SSE2-NEXT: pxor %xmm4, %xmm0 -; X64-SSE2-NEXT: pmaxsw %xmm2, %xmm0 ; X64-SSE2-NEXT: pxor %xmm4, %xmm3 ; X64-SSE2-NEXT: pxor %xmm4, %xmm1 ; X64-SSE2-NEXT: pmaxsw %xmm3, %xmm1 -; X64-SSE2-NEXT: movdqa %xmm4, %xmm2 -; X64-SSE2-NEXT: pxor %xmm4, %xmm2 -; X64-SSE2-NEXT: pxor %xmm2, %xmm1 -; X64-SSE2-NEXT: pxor %xmm0, %xmm2 -; X64-SSE2-NEXT: pmaxsw %xmm1, %xmm2 -; X64-SSE2-NEXT: pxor %xmm4, %xmm2 -; X64-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1] ; X64-SSE2-NEXT: pxor %xmm4, %xmm2 ; X64-SSE2-NEXT: pxor %xmm4, %xmm0 ; X64-SSE2-NEXT: pmaxsw %xmm2, %xmm0 -; X64-SSE2-NEXT: pxor %xmm4, %xmm0 -; X64-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] -; X64-SSE2-NEXT: pxor %xmm4, %xmm0 +; X64-SSE2-NEXT: pmaxsw %xmm1, %xmm0 +; X64-SSE2-NEXT: movdqa %xmm0, %xmm1 ; X64-SSE2-NEXT: pxor %xmm4, %xmm1 -; X64-SSE2-NEXT: pmaxsw %xmm0, %xmm1 +; X64-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] ; X64-SSE2-NEXT: pxor %xmm4, %xmm1 +; X64-SSE2-NEXT: pmaxsw %xmm0, %xmm1 ; X64-SSE2-NEXT: movdqa %xmm1, %xmm0 -; X64-SSE2-NEXT: psrld $16, %xmm0 -; X64-SSE2-NEXT: pxor %xmm4, %xmm1 ; X64-SSE2-NEXT: pxor %xmm4, %xmm0 -; X64-SSE2-NEXT: pmaxsw %xmm1, %xmm0 +; X64-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3] ; X64-SSE2-NEXT: pxor %xmm4, %xmm0 -; X64-SSE2-NEXT: movd %xmm0, %eax +; X64-SSE2-NEXT: pmaxsw %xmm1, %xmm0 +; X64-SSE2-NEXT: movdqa %xmm0, %xmm1 +; X64-SSE2-NEXT: pxor %xmm4, %xmm1 +; X64-SSE2-NEXT: psrld $16, %xmm1 +; X64-SSE2-NEXT: pxor %xmm4, %xmm1 +; X64-SSE2-NEXT: pmaxsw %xmm0, %xmm1 +; X64-SSE2-NEXT: pxor %xmm4, %xmm1 +; X64-SSE2-NEXT: movd %xmm1, %eax ; X64-SSE2-NEXT: ## kill: def $ax killed $ax killed $eax ; X64-SSE2-NEXT: retq ; Index: test/CodeGen/X86/horizontal-reduce-umin.ll =================================================================== --- test/CodeGen/X86/horizontal-reduce-umin.ll +++ test/CodeGen/X86/horizontal-reduce-umin.ll @@ -232,15 +232,14 @@ ; X86-SSE2-NEXT: pxor %xmm2, %xmm0 ; X86-SSE2-NEXT: pxor %xmm2, %xmm1 ; X86-SSE2-NEXT: pminsw %xmm0, %xmm1 -; X86-SSE2-NEXT: pxor %xmm2, %xmm1 -; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3] -; X86-SSE2-NEXT: pxor %xmm2, %xmm1 +; X86-SSE2-NEXT: movdqa %xmm1, %xmm0 ; X86-SSE2-NEXT: pxor %xmm2, %xmm0 -; X86-SSE2-NEXT: pminsw %xmm1, %xmm0 +; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3] ; X86-SSE2-NEXT: pxor %xmm2, %xmm0 +; X86-SSE2-NEXT: pminsw %xmm1, %xmm0 ; X86-SSE2-NEXT: movdqa %xmm0, %xmm1 +; X86-SSE2-NEXT: pxor %xmm2, %xmm1 ; X86-SSE2-NEXT: psrld $16, %xmm1 -; X86-SSE2-NEXT: pxor %xmm2, %xmm0 ; X86-SSE2-NEXT: pxor %xmm2, %xmm1 ; X86-SSE2-NEXT: pminsw %xmm0, %xmm1 ; X86-SSE2-NEXT: pxor %xmm2, %xmm1 @@ -269,15 +268,14 @@ ; X64-SSE2-NEXT: pxor %xmm2, %xmm0 ; X64-SSE2-NEXT: pxor %xmm2, %xmm1 ; X64-SSE2-NEXT: pminsw %xmm0, %xmm1 -; X64-SSE2-NEXT: pxor %xmm2, %xmm1 -; X64-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3] -; X64-SSE2-NEXT: pxor %xmm2, %xmm1 +; X64-SSE2-NEXT: movdqa %xmm1, %xmm0 ; X64-SSE2-NEXT: pxor %xmm2, %xmm0 -; X64-SSE2-NEXT: pminsw %xmm1, %xmm0 +; X64-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3] ; X64-SSE2-NEXT: pxor %xmm2, %xmm0 +; X64-SSE2-NEXT: pminsw %xmm1, %xmm0 ; X64-SSE2-NEXT: movdqa %xmm0, %xmm1 +; X64-SSE2-NEXT: pxor %xmm2, %xmm1 ; X64-SSE2-NEXT: psrld $16, %xmm1 -; X64-SSE2-NEXT: pxor %xmm2, %xmm0 ; X64-SSE2-NEXT: pxor %xmm2, %xmm1 ; X64-SSE2-NEXT: pminsw %xmm0, %xmm1 ; X64-SSE2-NEXT: pxor %xmm2, %xmm1 @@ -772,20 +770,19 @@ ; X86-SSE2-NEXT: pxor %xmm2, %xmm1 ; X86-SSE2-NEXT: pxor %xmm2, %xmm0 ; X86-SSE2-NEXT: pminsw %xmm1, %xmm0 -; X86-SSE2-NEXT: pxor %xmm2, %xmm0 -; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; X86-SSE2-NEXT: pxor %xmm2, %xmm0 -; X86-SSE2-NEXT: pxor %xmm2, %xmm1 -; X86-SSE2-NEXT: pminsw %xmm0, %xmm1 +; X86-SSE2-NEXT: movdqa %xmm0, %xmm1 ; X86-SSE2-NEXT: pxor %xmm2, %xmm1 -; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3] +; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] ; X86-SSE2-NEXT: pxor %xmm2, %xmm1 +; X86-SSE2-NEXT: pminsw %xmm0, %xmm1 +; X86-SSE2-NEXT: movdqa %xmm1, %xmm0 ; X86-SSE2-NEXT: pxor %xmm2, %xmm0 -; X86-SSE2-NEXT: pminsw %xmm1, %xmm0 +; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3] ; X86-SSE2-NEXT: pxor %xmm2, %xmm0 +; X86-SSE2-NEXT: pminsw %xmm1, %xmm0 ; X86-SSE2-NEXT: movdqa %xmm0, %xmm1 +; X86-SSE2-NEXT: pxor %xmm2, %xmm1 ; X86-SSE2-NEXT: psrld $16, %xmm1 -; X86-SSE2-NEXT: pxor %xmm2, %xmm0 ; X86-SSE2-NEXT: pxor %xmm2, %xmm1 ; X86-SSE2-NEXT: pminsw %xmm0, %xmm1 ; X86-SSE2-NEXT: pxor %xmm2, %xmm1 @@ -827,20 +824,19 @@ ; X64-SSE2-NEXT: pxor %xmm2, %xmm1 ; X64-SSE2-NEXT: pxor %xmm2, %xmm0 ; X64-SSE2-NEXT: pminsw %xmm1, %xmm0 -; X64-SSE2-NEXT: pxor %xmm2, %xmm0 -; X64-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; X64-SSE2-NEXT: pxor %xmm2, %xmm0 -; X64-SSE2-NEXT: pxor %xmm2, %xmm1 -; X64-SSE2-NEXT: pminsw %xmm0, %xmm1 +; X64-SSE2-NEXT: movdqa %xmm0, %xmm1 ; X64-SSE2-NEXT: pxor %xmm2, %xmm1 -; X64-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3] +; X64-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] ; X64-SSE2-NEXT: pxor %xmm2, %xmm1 +; X64-SSE2-NEXT: pminsw %xmm0, %xmm1 +; X64-SSE2-NEXT: movdqa %xmm1, %xmm0 ; X64-SSE2-NEXT: pxor %xmm2, %xmm0 -; X64-SSE2-NEXT: pminsw %xmm1, %xmm0 +; X64-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3] ; X64-SSE2-NEXT: pxor %xmm2, %xmm0 +; X64-SSE2-NEXT: pminsw %xmm1, %xmm0 ; X64-SSE2-NEXT: movdqa %xmm0, %xmm1 +; X64-SSE2-NEXT: pxor %xmm2, %xmm1 ; X64-SSE2-NEXT: psrld $16, %xmm1 -; X64-SSE2-NEXT: pxor %xmm2, %xmm0 ; X64-SSE2-NEXT: pxor %xmm2, %xmm1 ; X64-SSE2-NEXT: pminsw %xmm0, %xmm1 ; X64-SSE2-NEXT: pxor %xmm2, %xmm1 @@ -1574,35 +1570,30 @@ ; X86-SSE2-LABEL: test_reduce_v32i16: ; X86-SSE2: ## %bb.0: ; X86-SSE2-NEXT: movdqa {{.*#+}} xmm4 = [32768,32768,32768,32768,32768,32768,32768,32768] -; X86-SSE2-NEXT: pxor %xmm4, %xmm2 -; X86-SSE2-NEXT: pxor %xmm4, %xmm0 -; X86-SSE2-NEXT: pminsw %xmm2, %xmm0 ; X86-SSE2-NEXT: pxor %xmm4, %xmm3 ; X86-SSE2-NEXT: pxor %xmm4, %xmm1 ; X86-SSE2-NEXT: pminsw %xmm3, %xmm1 -; X86-SSE2-NEXT: movdqa %xmm4, %xmm2 -; X86-SSE2-NEXT: pxor %xmm4, %xmm2 -; X86-SSE2-NEXT: pxor %xmm2, %xmm1 -; X86-SSE2-NEXT: pxor %xmm0, %xmm2 -; X86-SSE2-NEXT: pminsw %xmm1, %xmm2 -; X86-SSE2-NEXT: pxor %xmm4, %xmm2 -; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1] ; X86-SSE2-NEXT: pxor %xmm4, %xmm2 ; X86-SSE2-NEXT: pxor %xmm4, %xmm0 ; X86-SSE2-NEXT: pminsw %xmm2, %xmm0 -; X86-SSE2-NEXT: pxor %xmm4, %xmm0 -; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] -; X86-SSE2-NEXT: pxor %xmm4, %xmm0 +; X86-SSE2-NEXT: pminsw %xmm1, %xmm0 +; X86-SSE2-NEXT: movdqa %xmm0, %xmm1 ; X86-SSE2-NEXT: pxor %xmm4, %xmm1 -; X86-SSE2-NEXT: pminsw %xmm0, %xmm1 +; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] ; X86-SSE2-NEXT: pxor %xmm4, %xmm1 +; X86-SSE2-NEXT: pminsw %xmm0, %xmm1 ; X86-SSE2-NEXT: movdqa %xmm1, %xmm0 -; X86-SSE2-NEXT: psrld $16, %xmm0 -; X86-SSE2-NEXT: pxor %xmm4, %xmm1 ; X86-SSE2-NEXT: pxor %xmm4, %xmm0 -; X86-SSE2-NEXT: pminsw %xmm1, %xmm0 +; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3] ; X86-SSE2-NEXT: pxor %xmm4, %xmm0 -; X86-SSE2-NEXT: movd %xmm0, %eax +; X86-SSE2-NEXT: pminsw %xmm1, %xmm0 +; X86-SSE2-NEXT: movdqa %xmm0, %xmm1 +; X86-SSE2-NEXT: pxor %xmm4, %xmm1 +; X86-SSE2-NEXT: psrld $16, %xmm1 +; X86-SSE2-NEXT: pxor %xmm4, %xmm1 +; X86-SSE2-NEXT: pminsw %xmm0, %xmm1 +; X86-SSE2-NEXT: pxor %xmm4, %xmm1 +; X86-SSE2-NEXT: movd %xmm1, %eax ; X86-SSE2-NEXT: ## kill: def $ax killed $ax killed $eax ; X86-SSE2-NEXT: retl ; @@ -1643,35 +1634,30 @@ ; X64-SSE2-LABEL: test_reduce_v32i16: ; X64-SSE2: ## %bb.0: ; X64-SSE2-NEXT: movdqa {{.*#+}} xmm4 = [32768,32768,32768,32768,32768,32768,32768,32768] -; X64-SSE2-NEXT: pxor %xmm4, %xmm2 -; X64-SSE2-NEXT: pxor %xmm4, %xmm0 -; X64-SSE2-NEXT: pminsw %xmm2, %xmm0 ; X64-SSE2-NEXT: pxor %xmm4, %xmm3 ; X64-SSE2-NEXT: pxor %xmm4, %xmm1 ; X64-SSE2-NEXT: pminsw %xmm3, %xmm1 -; X64-SSE2-NEXT: movdqa %xmm4, %xmm2 -; X64-SSE2-NEXT: pxor %xmm4, %xmm2 -; X64-SSE2-NEXT: pxor %xmm2, %xmm1 -; X64-SSE2-NEXT: pxor %xmm0, %xmm2 -; X64-SSE2-NEXT: pminsw %xmm1, %xmm2 -; X64-SSE2-NEXT: pxor %xmm4, %xmm2 -; X64-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1] ; X64-SSE2-NEXT: pxor %xmm4, %xmm2 ; X64-SSE2-NEXT: pxor %xmm4, %xmm0 ; X64-SSE2-NEXT: pminsw %xmm2, %xmm0 -; X64-SSE2-NEXT: pxor %xmm4, %xmm0 -; X64-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] -; X64-SSE2-NEXT: pxor %xmm4, %xmm0 +; X64-SSE2-NEXT: pminsw %xmm1, %xmm0 +; X64-SSE2-NEXT: movdqa %xmm0, %xmm1 ; X64-SSE2-NEXT: pxor %xmm4, %xmm1 -; X64-SSE2-NEXT: pminsw %xmm0, %xmm1 +; X64-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] ; X64-SSE2-NEXT: pxor %xmm4, %xmm1 +; X64-SSE2-NEXT: pminsw %xmm0, %xmm1 ; X64-SSE2-NEXT: movdqa %xmm1, %xmm0 -; X64-SSE2-NEXT: psrld $16, %xmm0 -; X64-SSE2-NEXT: pxor %xmm4, %xmm1 ; X64-SSE2-NEXT: pxor %xmm4, %xmm0 -; X64-SSE2-NEXT: pminsw %xmm1, %xmm0 +; X64-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3] ; X64-SSE2-NEXT: pxor %xmm4, %xmm0 -; X64-SSE2-NEXT: movd %xmm0, %eax +; X64-SSE2-NEXT: pminsw %xmm1, %xmm0 +; X64-SSE2-NEXT: movdqa %xmm0, %xmm1 +; X64-SSE2-NEXT: pxor %xmm4, %xmm1 +; X64-SSE2-NEXT: psrld $16, %xmm1 +; X64-SSE2-NEXT: pxor %xmm4, %xmm1 +; X64-SSE2-NEXT: pminsw %xmm0, %xmm1 +; X64-SSE2-NEXT: pxor %xmm4, %xmm1 +; X64-SSE2-NEXT: movd %xmm1, %eax ; X64-SSE2-NEXT: ## kill: def $ax killed $ax killed $eax ; X64-SSE2-NEXT: retq ; Index: test/CodeGen/X86/known-bits.ll =================================================================== --- test/CodeGen/X86/known-bits.ll +++ test/CodeGen/X86/known-bits.ll @@ -19,7 +19,7 @@ ; X32-NEXT: movzbl %al, %eax ; X32-NEXT: vpxor %xmm0, %xmm0, %xmm0 ; X32-NEXT: vpinsrd $1, %eax, %xmm0, %xmm1 -; X32-NEXT: vmovdqa {{.*#+}} xmm2 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0] +; X32-NEXT: vbroadcastss {{.*#+}} xmm2 = [3.57331108E-43,3.57331108E-43,3.57331108E-43,3.57331108E-43] ; X32-NEXT: vpand %xmm2, %xmm1, %xmm1 ; X32-NEXT: movzbl %cl, %eax ; X32-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0 @@ -69,7 +69,7 @@ ; X64-NEXT: movzbl %cl, %ecx ; X64-NEXT: vpxor %xmm0, %xmm0, %xmm0 ; X64-NEXT: vpinsrd $1, %ecx, %xmm0, %xmm1 -; X64-NEXT: vmovdqa {{.*#+}} xmm2 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0] +; X64-NEXT: vbroadcastss {{.*#+}} xmm2 = [3.57331108E-43,3.57331108E-43,3.57331108E-43,3.57331108E-43] ; X64-NEXT: vpand %xmm2, %xmm1, %xmm1 ; X64-NEXT: movzbl %al, %eax ; X64-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0 Index: test/CodeGen/X86/nontemporal-loads.ll =================================================================== --- test/CodeGen/X86/nontemporal-loads.ll +++ test/CodeGen/X86/nontemporal-loads.ll @@ -1800,35 +1800,23 @@ define <16 x i32> @test_masked_v16i32(i8 * %addr, <16 x i32> %old, <16 x i32> %mask1) { ; SSE2-LABEL: test_masked_v16i32: ; SSE2: # %bb.0: -; SSE2-NEXT: movdqa %xmm0, %xmm10 -; SSE2-NEXT: pxor %xmm12, %xmm12 -; SSE2-NEXT: pcmpeqd %xmm12, %xmm7 -; SSE2-NEXT: pcmpeqd %xmm0, %xmm0 -; SSE2-NEXT: movdqa %xmm7, %xmm8 -; SSE2-NEXT: pxor %xmm0, %xmm8 -; SSE2-NEXT: pcmpeqd %xmm12, %xmm6 -; SSE2-NEXT: movdqa %xmm6, %xmm9 -; SSE2-NEXT: pxor %xmm0, %xmm9 -; SSE2-NEXT: pcmpeqd %xmm12, %xmm5 -; SSE2-NEXT: movdqa %xmm5, %xmm11 -; SSE2-NEXT: pxor %xmm0, %xmm11 -; SSE2-NEXT: pcmpeqd %xmm12, %xmm4 -; SSE2-NEXT: pxor %xmm4, %xmm0 +; SSE2-NEXT: pxor %xmm8, %xmm8 +; SSE2-NEXT: pcmpeqd %xmm8, %xmm7 +; SSE2-NEXT: pcmpeqd %xmm8, %xmm6 +; SSE2-NEXT: pcmpeqd %xmm8, %xmm5 +; SSE2-NEXT: pcmpeqd %xmm8, %xmm4 +; SSE2-NEXT: pand %xmm4, %xmm0 ; SSE2-NEXT: pandn (%rdi), %xmm4 -; SSE2-NEXT: pandn %xmm10, %xmm0 ; SSE2-NEXT: por %xmm4, %xmm0 +; SSE2-NEXT: pand %xmm5, %xmm1 ; SSE2-NEXT: pandn 16(%rdi), %xmm5 -; SSE2-NEXT: pandn %xmm1, %xmm11 -; SSE2-NEXT: por %xmm5, %xmm11 +; SSE2-NEXT: por %xmm5, %xmm1 +; SSE2-NEXT: pand %xmm6, %xmm2 ; SSE2-NEXT: pandn 32(%rdi), %xmm6 -; SSE2-NEXT: pandn %xmm2, %xmm9 -; SSE2-NEXT: por %xmm6, %xmm9 +; SSE2-NEXT: por %xmm6, %xmm2 +; SSE2-NEXT: pand %xmm7, %xmm3 ; SSE2-NEXT: pandn 48(%rdi), %xmm7 -; SSE2-NEXT: pandn %xmm3, %xmm8 -; SSE2-NEXT: por %xmm7, %xmm8 -; SSE2-NEXT: movdqa %xmm11, %xmm1 -; SSE2-NEXT: movdqa %xmm9, %xmm2 -; SSE2-NEXT: movdqa %xmm8, %xmm3 +; SSE2-NEXT: por %xmm7, %xmm3 ; SSE2-NEXT: retq ; ; SSE41-LABEL: test_masked_v16i32: Index: test/CodeGen/X86/paddus.ll =================================================================== --- test/CodeGen/X86/paddus.ll +++ test/CodeGen/X86/paddus.ll @@ -801,22 +801,20 @@ ; SSE2-LABEL: test23: ; SSE2: # %bb.0: ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768] -; SSE2-NEXT: pxor %xmm2, %xmm0 ; SSE2-NEXT: pxor %xmm0, %xmm2 -; SSE2-NEXT: movdqa %xmm0, %xmm1 -; SSE2-NEXT: pcmpgtw %xmm2, %xmm1 -; SSE2-NEXT: por %xmm0, %xmm1 +; SSE2-NEXT: movdqa %xmm2, %xmm1 +; SSE2-NEXT: pcmpgtw %xmm0, %xmm1 +; SSE2-NEXT: por %xmm2, %xmm1 ; SSE2-NEXT: movdqa %xmm1, %xmm0 ; SSE2-NEXT: retq ; ; SSSE3-LABEL: test23: ; SSSE3: # %bb.0: ; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768] -; SSSE3-NEXT: pxor %xmm2, %xmm0 ; SSSE3-NEXT: pxor %xmm0, %xmm2 -; SSSE3-NEXT: movdqa %xmm0, %xmm1 -; SSSE3-NEXT: pcmpgtw %xmm2, %xmm1 -; SSSE3-NEXT: por %xmm0, %xmm1 +; SSSE3-NEXT: movdqa %xmm2, %xmm1 +; SSSE3-NEXT: pcmpgtw %xmm0, %xmm1 +; SSSE3-NEXT: por %xmm2, %xmm1 ; SSSE3-NEXT: movdqa %xmm1, %xmm0 ; SSSE3-NEXT: retq ; @@ -1029,37 +1027,33 @@ define <16 x i16> @test29(<16 x i16> %x) { ; SSE2-LABEL: test29: ; SSE2: # %bb.0: -; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [32768,32768,32768,32768,32768,32768,32768,32768] -; SSE2-NEXT: pxor %xmm4, %xmm0 -; SSE2-NEXT: pxor %xmm4, %xmm1 -; SSE2-NEXT: movdqa %xmm1, %xmm3 -; SSE2-NEXT: pxor %xmm4, %xmm3 -; SSE2-NEXT: movdqa %xmm1, %xmm2 -; SSE2-NEXT: pcmpgtw %xmm3, %xmm2 -; SSE2-NEXT: pxor %xmm0, %xmm4 -; SSE2-NEXT: movdqa %xmm0, %xmm3 -; SSE2-NEXT: pcmpgtw %xmm4, %xmm3 -; SSE2-NEXT: por %xmm0, %xmm3 -; SSE2-NEXT: por %xmm1, %xmm2 -; SSE2-NEXT: movdqa %xmm3, %xmm0 +; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [32768,32768,32768,32768,32768,32768,32768,32768] +; SSE2-NEXT: movdqa %xmm0, %xmm4 +; SSE2-NEXT: pxor %xmm3, %xmm4 +; SSE2-NEXT: pxor %xmm1, %xmm3 +; SSE2-NEXT: movdqa %xmm3, %xmm2 +; SSE2-NEXT: pcmpgtw %xmm1, %xmm2 +; SSE2-NEXT: movdqa %xmm4, %xmm1 +; SSE2-NEXT: pcmpgtw %xmm0, %xmm1 +; SSE2-NEXT: por %xmm4, %xmm1 +; SSE2-NEXT: por %xmm3, %xmm2 +; SSE2-NEXT: movdqa %xmm1, %xmm0 ; SSE2-NEXT: movdqa %xmm2, %xmm1 ; SSE2-NEXT: retq ; ; SSSE3-LABEL: test29: ; SSSE3: # %bb.0: -; SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [32768,32768,32768,32768,32768,32768,32768,32768] -; SSSE3-NEXT: pxor %xmm4, %xmm0 -; SSSE3-NEXT: pxor %xmm4, %xmm1 -; SSSE3-NEXT: movdqa %xmm1, %xmm3 -; SSSE3-NEXT: pxor %xmm4, %xmm3 -; SSSE3-NEXT: movdqa %xmm1, %xmm2 -; SSSE3-NEXT: pcmpgtw %xmm3, %xmm2 -; SSSE3-NEXT: pxor %xmm0, %xmm4 -; SSSE3-NEXT: movdqa %xmm0, %xmm3 -; SSSE3-NEXT: pcmpgtw %xmm4, %xmm3 -; SSSE3-NEXT: por %xmm0, %xmm3 -; SSSE3-NEXT: por %xmm1, %xmm2 -; SSSE3-NEXT: movdqa %xmm3, %xmm0 +; SSSE3-NEXT: movdqa {{.*#+}} xmm3 = [32768,32768,32768,32768,32768,32768,32768,32768] +; SSSE3-NEXT: movdqa %xmm0, %xmm4 +; SSSE3-NEXT: pxor %xmm3, %xmm4 +; SSSE3-NEXT: pxor %xmm1, %xmm3 +; SSSE3-NEXT: movdqa %xmm3, %xmm2 +; SSSE3-NEXT: pcmpgtw %xmm1, %xmm2 +; SSSE3-NEXT: movdqa %xmm4, %xmm1 +; SSSE3-NEXT: pcmpgtw %xmm0, %xmm1 +; SSSE3-NEXT: por %xmm4, %xmm1 +; SSSE3-NEXT: por %xmm3, %xmm2 +; SSSE3-NEXT: movdqa %xmm1, %xmm0 ; SSSE3-NEXT: movdqa %xmm2, %xmm1 ; SSSE3-NEXT: retq ; @@ -1343,66 +1337,58 @@ define <32 x i16> @test35(<32 x i16> %x) { ; SSE2-LABEL: test35: ; SSE2: # %bb.0: -; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [32768,32768,32768,32768,32768,32768,32768,32768] -; SSE2-NEXT: pxor %xmm4, %xmm0 -; SSE2-NEXT: pxor %xmm4, %xmm1 -; SSE2-NEXT: pxor %xmm4, %xmm2 -; SSE2-NEXT: pxor %xmm4, %xmm3 -; SSE2-NEXT: movdqa %xmm3, %xmm5 -; SSE2-NEXT: pxor %xmm4, %xmm5 -; SSE2-NEXT: movdqa %xmm3, %xmm8 -; SSE2-NEXT: pcmpgtw %xmm5, %xmm8 -; SSE2-NEXT: movdqa %xmm2, %xmm6 -; SSE2-NEXT: pxor %xmm4, %xmm6 -; SSE2-NEXT: movdqa %xmm2, %xmm5 -; SSE2-NEXT: pcmpgtw %xmm6, %xmm5 +; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [32768,32768,32768,32768,32768,32768,32768,32768] +; SSE2-NEXT: movdqa %xmm0, %xmm6 +; SSE2-NEXT: pxor %xmm5, %xmm6 ; SSE2-NEXT: movdqa %xmm1, %xmm7 -; SSE2-NEXT: pxor %xmm4, %xmm7 -; SSE2-NEXT: movdqa %xmm1, %xmm6 -; SSE2-NEXT: pcmpgtw %xmm7, %xmm6 -; SSE2-NEXT: pxor %xmm0, %xmm4 -; SSE2-NEXT: movdqa %xmm0, %xmm7 -; SSE2-NEXT: pcmpgtw %xmm4, %xmm7 -; SSE2-NEXT: por %xmm0, %xmm7 -; SSE2-NEXT: por %xmm1, %xmm6 -; SSE2-NEXT: por %xmm2, %xmm5 -; SSE2-NEXT: por %xmm3, %xmm8 -; SSE2-NEXT: movdqa %xmm7, %xmm0 -; SSE2-NEXT: movdqa %xmm6, %xmm1 -; SSE2-NEXT: movdqa %xmm5, %xmm2 +; SSE2-NEXT: pxor %xmm5, %xmm7 +; SSE2-NEXT: movdqa %xmm2, %xmm8 +; SSE2-NEXT: pxor %xmm5, %xmm8 +; SSE2-NEXT: pxor %xmm3, %xmm5 +; SSE2-NEXT: movdqa %xmm5, %xmm4 +; SSE2-NEXT: pcmpgtw %xmm3, %xmm4 ; SSE2-NEXT: movdqa %xmm8, %xmm3 +; SSE2-NEXT: pcmpgtw %xmm2, %xmm3 +; SSE2-NEXT: movdqa %xmm7, %xmm2 +; SSE2-NEXT: pcmpgtw %xmm1, %xmm2 +; SSE2-NEXT: movdqa %xmm6, %xmm1 +; SSE2-NEXT: pcmpgtw %xmm0, %xmm1 +; SSE2-NEXT: por %xmm6, %xmm1 +; SSE2-NEXT: por %xmm7, %xmm2 +; SSE2-NEXT: por %xmm8, %xmm3 +; SSE2-NEXT: por %xmm5, %xmm4 +; SSE2-NEXT: movdqa %xmm1, %xmm0 +; SSE2-NEXT: movdqa %xmm2, %xmm1 +; SSE2-NEXT: movdqa %xmm3, %xmm2 +; SSE2-NEXT: movdqa %xmm4, %xmm3 ; SSE2-NEXT: retq ; ; SSSE3-LABEL: test35: ; SSSE3: # %bb.0: -; SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [32768,32768,32768,32768,32768,32768,32768,32768] -; SSSE3-NEXT: pxor %xmm4, %xmm0 -; SSSE3-NEXT: pxor %xmm4, %xmm1 -; SSSE3-NEXT: pxor %xmm4, %xmm2 -; SSSE3-NEXT: pxor %xmm4, %xmm3 -; SSSE3-NEXT: movdqa %xmm3, %xmm5 -; SSSE3-NEXT: pxor %xmm4, %xmm5 -; SSSE3-NEXT: movdqa %xmm3, %xmm8 -; SSSE3-NEXT: pcmpgtw %xmm5, %xmm8 -; SSSE3-NEXT: movdqa %xmm2, %xmm6 -; SSSE3-NEXT: pxor %xmm4, %xmm6 -; SSSE3-NEXT: movdqa %xmm2, %xmm5 -; SSSE3-NEXT: pcmpgtw %xmm6, %xmm5 +; SSSE3-NEXT: movdqa {{.*#+}} xmm5 = [32768,32768,32768,32768,32768,32768,32768,32768] +; SSSE3-NEXT: movdqa %xmm0, %xmm6 +; SSSE3-NEXT: pxor %xmm5, %xmm6 ; SSSE3-NEXT: movdqa %xmm1, %xmm7 -; SSSE3-NEXT: pxor %xmm4, %xmm7 -; SSSE3-NEXT: movdqa %xmm1, %xmm6 -; SSSE3-NEXT: pcmpgtw %xmm7, %xmm6 -; SSSE3-NEXT: pxor %xmm0, %xmm4 -; SSSE3-NEXT: movdqa %xmm0, %xmm7 -; SSSE3-NEXT: pcmpgtw %xmm4, %xmm7 -; SSSE3-NEXT: por %xmm0, %xmm7 -; SSSE3-NEXT: por %xmm1, %xmm6 -; SSSE3-NEXT: por %xmm2, %xmm5 -; SSSE3-NEXT: por %xmm3, %xmm8 -; SSSE3-NEXT: movdqa %xmm7, %xmm0 -; SSSE3-NEXT: movdqa %xmm6, %xmm1 -; SSSE3-NEXT: movdqa %xmm5, %xmm2 +; SSSE3-NEXT: pxor %xmm5, %xmm7 +; SSSE3-NEXT: movdqa %xmm2, %xmm8 +; SSSE3-NEXT: pxor %xmm5, %xmm8 +; SSSE3-NEXT: pxor %xmm3, %xmm5 +; SSSE3-NEXT: movdqa %xmm5, %xmm4 +; SSSE3-NEXT: pcmpgtw %xmm3, %xmm4 ; SSSE3-NEXT: movdqa %xmm8, %xmm3 +; SSSE3-NEXT: pcmpgtw %xmm2, %xmm3 +; SSSE3-NEXT: movdqa %xmm7, %xmm2 +; SSSE3-NEXT: pcmpgtw %xmm1, %xmm2 +; SSSE3-NEXT: movdqa %xmm6, %xmm1 +; SSSE3-NEXT: pcmpgtw %xmm0, %xmm1 +; SSSE3-NEXT: por %xmm6, %xmm1 +; SSSE3-NEXT: por %xmm7, %xmm2 +; SSSE3-NEXT: por %xmm8, %xmm3 +; SSSE3-NEXT: por %xmm5, %xmm4 +; SSSE3-NEXT: movdqa %xmm1, %xmm0 +; SSSE3-NEXT: movdqa %xmm2, %xmm1 +; SSSE3-NEXT: movdqa %xmm3, %xmm2 +; SSSE3-NEXT: movdqa %xmm4, %xmm3 ; SSSE3-NEXT: retq ; ; SSE41-LABEL: test35: Index: test/CodeGen/X86/psubus.ll =================================================================== --- test/CodeGen/X86/psubus.ll +++ test/CodeGen/X86/psubus.ll @@ -792,7 +792,7 @@ ; AVX1-NEXT: vpsubd %xmm9, %xmm1, %xmm1 ; AVX1-NEXT: vpsubd %xmm11, %xmm2, %xmm2 ; AVX1-NEXT: vpsubd %xmm0, %xmm6, %xmm0 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0] +; AVX1-NEXT: vbroadcastss {{.*#+}} xmm5 = [3.57331108E-43,3.57331108E-43,3.57331108E-43,3.57331108E-43] ; AVX1-NEXT: vpand %xmm5, %xmm0, %xmm0 ; AVX1-NEXT: vpand %xmm5, %xmm2, %xmm2 ; AVX1-NEXT: vpackusdw %xmm0, %xmm2, %xmm0 @@ -1465,15 +1465,17 @@ ; SSSE3-NEXT: movdqa {{.*#+}} xmm6 = [2147549183,2147549183,2147549183,2147549183] ; SSSE3-NEXT: movdqa %xmm6, %xmm7 ; SSSE3-NEXT: pcmpgtd %xmm5, %xmm7 -; SSSE3-NEXT: movdqa {{.*#+}} xmm5 = [65535,65535,65535,65535] ; SSSE3-NEXT: pand %xmm7, %xmm2 -; SSSE3-NEXT: pandn %xmm5, %xmm7 +; SSSE3-NEXT: pcmpeqd %xmm5, %xmm5 +; SSSE3-NEXT: pxor %xmm5, %xmm7 +; SSSE3-NEXT: psrld $16, %xmm7 ; SSSE3-NEXT: por %xmm2, %xmm7 ; SSSE3-NEXT: pshufb %xmm3, %xmm7 ; SSSE3-NEXT: pxor %xmm1, %xmm4 ; SSSE3-NEXT: pcmpgtd %xmm4, %xmm6 ; SSSE3-NEXT: pand %xmm6, %xmm1 -; SSSE3-NEXT: pandn %xmm5, %xmm6 +; SSSE3-NEXT: pxor %xmm5, %xmm6 +; SSSE3-NEXT: psrld $16, %xmm6 ; SSSE3-NEXT: por %xmm1, %xmm6 ; SSSE3-NEXT: pshufb %xmm3, %xmm6 ; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm6 = xmm6[0],xmm7[0] @@ -2044,15 +2046,17 @@ ; SSSE3-NEXT: movdqa {{.*#+}} xmm6 = [2147549183,2147549183,2147549183,2147549183] ; SSSE3-NEXT: movdqa %xmm6, %xmm7 ; SSSE3-NEXT: pcmpgtd %xmm5, %xmm7 -; SSSE3-NEXT: movdqa {{.*#+}} xmm5 = [65535,65535,65535,65535] ; SSSE3-NEXT: pand %xmm7, %xmm2 -; SSSE3-NEXT: pandn %xmm5, %xmm7 +; SSSE3-NEXT: pcmpeqd %xmm5, %xmm5 +; SSSE3-NEXT: pxor %xmm5, %xmm7 +; SSSE3-NEXT: psrld $16, %xmm7 ; SSSE3-NEXT: por %xmm2, %xmm7 ; SSSE3-NEXT: pshufb %xmm3, %xmm7 ; SSSE3-NEXT: pxor %xmm1, %xmm4 ; SSSE3-NEXT: pcmpgtd %xmm4, %xmm6 ; SSSE3-NEXT: pand %xmm6, %xmm1 -; SSSE3-NEXT: pandn %xmm5, %xmm6 +; SSSE3-NEXT: pxor %xmm5, %xmm6 +; SSSE3-NEXT: psrld $16, %xmm6 ; SSSE3-NEXT: por %xmm1, %xmm6 ; SSSE3-NEXT: pshufb %xmm3, %xmm6 ; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm6 = xmm6[0],xmm7[0] @@ -2148,15 +2152,17 @@ ; SSSE3-NEXT: movdqa {{.*#+}} xmm6 = [2147549183,2147549183,2147549183,2147549183] ; SSSE3-NEXT: movdqa %xmm6, %xmm7 ; SSSE3-NEXT: pcmpgtd %xmm5, %xmm7 -; SSSE3-NEXT: movdqa {{.*#+}} xmm5 = [65535,65535,65535,65535] ; SSSE3-NEXT: pand %xmm7, %xmm2 -; SSSE3-NEXT: pandn %xmm5, %xmm7 +; SSSE3-NEXT: pcmpeqd %xmm5, %xmm5 +; SSSE3-NEXT: pxor %xmm5, %xmm7 +; SSSE3-NEXT: psrld $16, %xmm7 ; SSSE3-NEXT: por %xmm2, %xmm7 ; SSSE3-NEXT: pshufb %xmm3, %xmm7 ; SSSE3-NEXT: pxor %xmm1, %xmm4 ; SSSE3-NEXT: pcmpgtd %xmm4, %xmm6 ; SSSE3-NEXT: pand %xmm6, %xmm1 -; SSSE3-NEXT: pandn %xmm5, %xmm6 +; SSSE3-NEXT: pxor %xmm5, %xmm6 +; SSSE3-NEXT: psrld $16, %xmm6 ; SSSE3-NEXT: por %xmm1, %xmm6 ; SSSE3-NEXT: pshufb %xmm3, %xmm6 ; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm6 = xmm6[0],xmm7[0] Index: test/CodeGen/X86/sat-add.ll =================================================================== --- test/CodeGen/X86/sat-add.ll +++ test/CodeGen/X86/sat-add.ll @@ -679,13 +679,12 @@ define <8 x i16> @unsigned_sat_variable_v8i16_using_min(<8 x i16> %x, <8 x i16> %y) { ; SSE2-LABEL: unsigned_sat_variable_v8i16_using_min: ; SSE2: # %bb.0: -; SSE2-NEXT: pcmpeqd %xmm2, %xmm2 -; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [32768,32768,32768,32768,32768,32768,32768,32768] -; SSE2-NEXT: pxor %xmm3, %xmm0 -; SSE2-NEXT: pxor %xmm3, %xmm2 -; SSE2-NEXT: pxor %xmm1, %xmm2 -; SSE2-NEXT: pminsw %xmm2, %xmm0 -; SSE2-NEXT: pxor %xmm3, %xmm0 +; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768] +; SSE2-NEXT: pxor %xmm2, %xmm0 +; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [32767,32767,32767,32767,32767,32767,32767,32767] +; SSE2-NEXT: pxor %xmm1, %xmm3 +; SSE2-NEXT: pminsw %xmm3, %xmm0 +; SSE2-NEXT: pxor %xmm2, %xmm0 ; SSE2-NEXT: paddw %xmm1, %xmm0 ; SSE2-NEXT: retq ; @@ -717,15 +716,12 @@ define <8 x i16> @unsigned_sat_variable_v8i16_using_cmp_notval(<8 x i16> %x, <8 x i16> %y) { ; SSE2-LABEL: unsigned_sat_variable_v8i16_using_cmp_notval: ; SSE2: # %bb.0: -; SSE2-NEXT: pcmpeqd %xmm2, %xmm2 -; SSE2-NEXT: movdqa %xmm0, %xmm3 -; SSE2-NEXT: paddw %xmm1, %xmm3 -; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [32768,32768,32768,32768,32768,32768,32768,32768] -; SSE2-NEXT: pxor %xmm4, %xmm0 -; SSE2-NEXT: pxor %xmm4, %xmm2 -; SSE2-NEXT: pxor %xmm1, %xmm2 -; SSE2-NEXT: pcmpgtw %xmm2, %xmm0 -; SSE2-NEXT: por %xmm3, %xmm0 +; SSE2-NEXT: movdqa %xmm0, %xmm2 +; SSE2-NEXT: paddw %xmm1, %xmm2 +; SSE2-NEXT: pxor {{.*}}(%rip), %xmm1 +; SSE2-NEXT: pxor {{.*}}(%rip), %xmm0 +; SSE2-NEXT: pcmpgtw %xmm1, %xmm0 +; SSE2-NEXT: por %xmm2, %xmm0 ; SSE2-NEXT: retq ; ; SSE41-LABEL: unsigned_sat_variable_v8i16_using_cmp_notval: @@ -750,17 +746,15 @@ ; SSE2-LABEL: unsigned_sat_variable_v4i32_using_min: ; SSE2: # %bb.0: ; SSE2-NEXT: pcmpeqd %xmm2, %xmm2 +; SSE2-NEXT: pxor %xmm1, %xmm2 ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [2147483648,2147483648,2147483648,2147483648] -; SSE2-NEXT: movdqa %xmm0, %xmm4 -; SSE2-NEXT: pxor %xmm3, %xmm4 -; SSE2-NEXT: pxor %xmm2, %xmm3 -; SSE2-NEXT: pxor %xmm1, %xmm3 -; SSE2-NEXT: pcmpgtd %xmm4, %xmm3 -; SSE2-NEXT: pand %xmm3, %xmm0 -; SSE2-NEXT: pxor %xmm2, %xmm3 -; SSE2-NEXT: movdqa %xmm1, %xmm2 -; SSE2-NEXT: pandn %xmm3, %xmm2 -; SSE2-NEXT: por %xmm2, %xmm0 +; SSE2-NEXT: pxor %xmm0, %xmm3 +; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [2147483647,2147483647,2147483647,2147483647] +; SSE2-NEXT: pxor %xmm1, %xmm4 +; SSE2-NEXT: pcmpgtd %xmm3, %xmm4 +; SSE2-NEXT: pand %xmm4, %xmm0 +; SSE2-NEXT: pandn %xmm2, %xmm4 +; SSE2-NEXT: por %xmm4, %xmm0 ; SSE2-NEXT: paddd %xmm1, %xmm0 ; SSE2-NEXT: retq ; @@ -809,15 +803,12 @@ define <4 x i32> @unsigned_sat_variable_v4i32_using_cmp_notval(<4 x i32> %x, <4 x i32> %y) { ; SSE2-LABEL: unsigned_sat_variable_v4i32_using_cmp_notval: ; SSE2: # %bb.0: -; SSE2-NEXT: pcmpeqd %xmm2, %xmm2 -; SSE2-NEXT: movdqa %xmm0, %xmm3 -; SSE2-NEXT: paddd %xmm1, %xmm3 -; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,2147483648,2147483648,2147483648] -; SSE2-NEXT: pxor %xmm4, %xmm0 -; SSE2-NEXT: pxor %xmm4, %xmm2 -; SSE2-NEXT: pxor %xmm1, %xmm2 -; SSE2-NEXT: pcmpgtd %xmm2, %xmm0 -; SSE2-NEXT: por %xmm3, %xmm0 +; SSE2-NEXT: movdqa %xmm0, %xmm2 +; SSE2-NEXT: paddd %xmm1, %xmm2 +; SSE2-NEXT: pxor {{.*}}(%rip), %xmm1 +; SSE2-NEXT: pxor {{.*}}(%rip), %xmm0 +; SSE2-NEXT: pcmpgtd %xmm1, %xmm0 +; SSE2-NEXT: por %xmm2, %xmm0 ; SSE2-NEXT: retq ; ; SSE41-LABEL: unsigned_sat_variable_v4i32_using_cmp_notval: Index: test/CodeGen/X86/setcc-lowering.ll =================================================================== --- test/CodeGen/X86/setcc-lowering.ll +++ test/CodeGen/X86/setcc-lowering.ll @@ -45,19 +45,17 @@ ; AVX-LABEL: pr26232: ; AVX: # %bb.0: # %allocas ; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1 -; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128] ; AVX-NEXT: .p2align 4, 0x90 ; AVX-NEXT: .LBB1_1: # %for_loop599 ; AVX-NEXT: # =>This Inner Loop Header: Depth=1 ; AVX-NEXT: xorl %eax, %eax ; AVX-NEXT: cmpq $65536, %rdi # imm = 0x10000 ; AVX-NEXT: setl %al -; AVX-NEXT: vmovd %eax, %xmm3 -; AVX-NEXT: vpshufb %xmm1, %xmm3, %xmm3 -; AVX-NEXT: vpand %xmm0, %xmm3, %xmm3 -; AVX-NEXT: vpsllw $7, %xmm3, %xmm3 -; AVX-NEXT: vpand %xmm2, %xmm3, %xmm3 -; AVX-NEXT: vpmovmskb %xmm3, %eax +; AVX-NEXT: vmovd %eax, %xmm2 +; AVX-NEXT: vpshufb %xmm1, %xmm2, %xmm2 +; AVX-NEXT: vpand %xmm0, %xmm2, %xmm2 +; AVX-NEXT: vpsllw $7, %xmm2, %xmm2 +; AVX-NEXT: vpmovmskb %xmm2, %eax ; AVX-NEXT: testw %ax, %ax ; AVX-NEXT: jne .LBB1_1 ; AVX-NEXT: # %bb.2: # %for_exit600 Index: test/CodeGen/X86/sse2-intrinsics-canonical.ll =================================================================== --- test/CodeGen/X86/sse2-intrinsics-canonical.ll +++ test/CodeGen/X86/sse2-intrinsics-canonical.ll @@ -198,9 +198,9 @@ ; ; AVX2-LABEL: test_x86_sse2_psubus_b_64: ; AVX2: ## %bb.0: -; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0] -; AVX2-NEXT: ## encoding: [0xc5,0xf9,0x6f,0x15,A,A,A,A] -; AVX2-NEXT: ## fixup A - offset: 4, value: LCPI6_0, kind: FK_Data_4 +; AVX2-NEXT: vpbroadcastw {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255] +; AVX2-NEXT: ## encoding: [0xc4,0xe2,0x79,0x79,0x15,A,A,A,A] +; AVX2-NEXT: ## fixup A - offset: 5, value: LCPI6_0, kind: FK_Data_4 ; AVX2-NEXT: vpand %xmm2, %xmm1, %xmm3 ## encoding: [0xc5,0xf1,0xdb,0xda] ; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xdb,0xc2] ; AVX2-NEXT: vpmaxuw %xmm3, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x3e,0xc3] @@ -209,9 +209,9 @@ ; ; SKX-LABEL: test_x86_sse2_psubus_b_64: ; SKX: ## %bb.0: -; SKX-NEXT: vmovdqa LCPI6_0, %xmm2 ## EVEX TO VEX Compression xmm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0] -; SKX-NEXT: ## encoding: [0xc5,0xf9,0x6f,0x15,A,A,A,A] -; SKX-NEXT: ## fixup A - offset: 4, value: LCPI6_0, kind: FK_Data_4 +; SKX-NEXT: vpbroadcastw LCPI6_0, %xmm2 ## EVEX TO VEX Compression xmm2 = [255,255,255,255,255,255,255,255] +; SKX-NEXT: ## encoding: [0xc4,0xe2,0x79,0x79,0x15,A,A,A,A] +; SKX-NEXT: ## fixup A - offset: 5, value: LCPI6_0, kind: FK_Data_4 ; SKX-NEXT: vpand %xmm2, %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xdb,0xda] ; SKX-NEXT: vpand %xmm2, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xdb,0xc2] ; SKX-NEXT: vpmaxuw %xmm3, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x3e,0xc3] Index: test/CodeGen/X86/unfold-masked-merge-vector-variablemask-const.ll =================================================================== --- test/CodeGen/X86/unfold-masked-merge-vector-variablemask-const.ll +++ test/CodeGen/X86/unfold-masked-merge-vector-variablemask-const.ll @@ -132,9 +132,9 @@ ; ; CHECK-SSE2-LABEL: in_constant_varx_mone_invmask: ; CHECK-SSE2: # %bb.0: -; CHECK-SSE2-NEXT: movdqa (%rdi), %xmm0 +; CHECK-SSE2-NEXT: movdqa (%rdx), %xmm0 ; CHECK-SSE2-NEXT: pcmpeqd %xmm1, %xmm1 -; CHECK-SSE2-NEXT: movdqa (%rdx), %xmm2 +; CHECK-SSE2-NEXT: movdqa (%rdi), %xmm2 ; CHECK-SSE2-NEXT: pxor %xmm1, %xmm2 ; CHECK-SSE2-NEXT: pandn %xmm2, %xmm0 ; CHECK-SSE2-NEXT: pxor %xmm1, %xmm0 @@ -142,9 +142,9 @@ ; ; CHECK-XOP-LABEL: in_constant_varx_mone_invmask: ; CHECK-XOP: # %bb.0: -; CHECK-XOP-NEXT: vmovdqa (%rdi), %xmm0 +; CHECK-XOP-NEXT: vmovdqa (%rdx), %xmm0 ; CHECK-XOP-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 -; CHECK-XOP-NEXT: vpxor (%rdx), %xmm1, %xmm2 +; CHECK-XOP-NEXT: vpxor (%rdi), %xmm1, %xmm2 ; CHECK-XOP-NEXT: vpandn %xmm2, %xmm0, %xmm0 ; CHECK-XOP-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; CHECK-XOP-NEXT: retq Index: test/CodeGen/X86/v8i1-masks.ll =================================================================== --- test/CodeGen/X86/v8i1-masks.ll +++ test/CodeGen/X86/v8i1-masks.ll @@ -44,10 +44,9 @@ ; X32-AVX2-NEXT: vcmpltps %ymm0, %ymm1, %ymm1 ; X32-AVX2-NEXT: vmovups (%eax), %ymm2 ; X32-AVX2-NEXT: vcmpltps %ymm0, %ymm2, %ymm0 -; X32-AVX2-NEXT: vbroadcastss {{.*#+}} ymm2 = [1,1,1,1,1,1,1,1] -; X32-AVX2-NEXT: vandps %ymm2, %ymm1, %ymm1 ; X32-AVX2-NEXT: vandps %ymm1, %ymm0, %ymm0 -; X32-AVX2-NEXT: vmovaps %ymm0, (%eax) +; X32-AVX2-NEXT: vpsrld $31, %ymm0, %ymm0 +; X32-AVX2-NEXT: vmovdqa %ymm0, (%eax) ; X32-AVX2-NEXT: vzeroupper ; X32-AVX2-NEXT: retl ; @@ -58,10 +57,9 @@ ; X64-AVX2-NEXT: vcmpltps %ymm0, %ymm1, %ymm1 ; X64-AVX2-NEXT: vmovups (%rdx), %ymm2 ; X64-AVX2-NEXT: vcmpltps %ymm0, %ymm2, %ymm0 -; X64-AVX2-NEXT: vbroadcastss {{.*#+}} ymm2 = [1,1,1,1,1,1,1,1] -; X64-AVX2-NEXT: vandps %ymm2, %ymm1, %ymm1 ; X64-AVX2-NEXT: vandps %ymm1, %ymm0, %ymm0 -; X64-AVX2-NEXT: vmovaps %ymm0, (%rax) +; X64-AVX2-NEXT: vpsrld $31, %ymm0, %ymm0 +; X64-AVX2-NEXT: vmovdqa %ymm0, (%rax) ; X64-AVX2-NEXT: vzeroupper ; X64-AVX2-NEXT: retq %v0 = load <8 x float>, <8 x float>* %a, align 16 Index: test/CodeGen/X86/vector-blend.ll =================================================================== --- test/CodeGen/X86/vector-blend.ll +++ test/CodeGen/X86/vector-blend.ll @@ -629,7 +629,7 @@ ; ; AVX1-LABEL: constant_pblendvb_avx2: ; AVX1: # %bb.0: # %entry -; AVX1-NEXT: vmovaps {{.*#+}} ymm2 = [255,255,0,255,0,0,0,255,255,255,0,255,0,0,0,255,255,255,0,255,0,0,0,255,255,255,0,255,0,0,0,255] +; AVX1-NEXT: vbroadcastsd {{.*#+}} ymm2 = [-5.4861292804117373E+303,-5.4861292804117373E+303,-5.4861292804117373E+303,-5.4861292804117373E+303] ; AVX1-NEXT: vandnps %ymm0, %ymm2, %ymm0 ; AVX1-NEXT: vandps %ymm2, %ymm1, %ymm1 ; AVX1-NEXT: vorps %ymm0, %ymm1, %ymm0 Index: test/CodeGen/X86/vector-reduce-umax.ll =================================================================== --- test/CodeGen/X86/vector-reduce-umax.ll +++ test/CodeGen/X86/vector-reduce-umax.ll @@ -1141,15 +1141,14 @@ ; SSE2-NEXT: pxor %xmm2, %xmm0 ; SSE2-NEXT: pxor %xmm2, %xmm1 ; SSE2-NEXT: pmaxsw %xmm0, %xmm1 -; SSE2-NEXT: pxor %xmm2, %xmm1 -; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3] -; SSE2-NEXT: pxor %xmm2, %xmm1 +; SSE2-NEXT: movdqa %xmm1, %xmm0 ; SSE2-NEXT: pxor %xmm2, %xmm0 -; SSE2-NEXT: pmaxsw %xmm1, %xmm0 +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3] ; SSE2-NEXT: pxor %xmm2, %xmm0 +; SSE2-NEXT: pmaxsw %xmm1, %xmm0 ; SSE2-NEXT: movdqa %xmm0, %xmm1 +; SSE2-NEXT: pxor %xmm2, %xmm1 ; SSE2-NEXT: psrld $16, %xmm1 -; SSE2-NEXT: pxor %xmm2, %xmm0 ; SSE2-NEXT: pxor %xmm2, %xmm1 ; SSE2-NEXT: pmaxsw %xmm0, %xmm1 ; SSE2-NEXT: pxor %xmm2, %xmm1 @@ -1207,20 +1206,19 @@ ; SSE2-NEXT: pxor %xmm2, %xmm1 ; SSE2-NEXT: pxor %xmm2, %xmm0 ; SSE2-NEXT: pmaxsw %xmm1, %xmm0 -; SSE2-NEXT: pxor %xmm2, %xmm0 -; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; SSE2-NEXT: pxor %xmm2, %xmm0 -; SSE2-NEXT: pxor %xmm2, %xmm1 -; SSE2-NEXT: pmaxsw %xmm0, %xmm1 +; SSE2-NEXT: movdqa %xmm0, %xmm1 ; SSE2-NEXT: pxor %xmm2, %xmm1 -; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3] +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] ; SSE2-NEXT: pxor %xmm2, %xmm1 +; SSE2-NEXT: pmaxsw %xmm0, %xmm1 +; SSE2-NEXT: movdqa %xmm1, %xmm0 ; SSE2-NEXT: pxor %xmm2, %xmm0 -; SSE2-NEXT: pmaxsw %xmm1, %xmm0 +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3] ; SSE2-NEXT: pxor %xmm2, %xmm0 +; SSE2-NEXT: pmaxsw %xmm1, %xmm0 ; SSE2-NEXT: movdqa %xmm0, %xmm1 +; SSE2-NEXT: pxor %xmm2, %xmm1 ; SSE2-NEXT: psrld $16, %xmm1 -; SSE2-NEXT: pxor %xmm2, %xmm0 ; SSE2-NEXT: pxor %xmm2, %xmm1 ; SSE2-NEXT: pmaxsw %xmm0, %xmm1 ; SSE2-NEXT: pxor %xmm2, %xmm1 @@ -1296,35 +1294,30 @@ ; SSE2-LABEL: test_v32i16: ; SSE2: # %bb.0: ; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [32768,32768,32768,32768,32768,32768,32768,32768] -; SSE2-NEXT: pxor %xmm4, %xmm2 -; SSE2-NEXT: pxor %xmm4, %xmm0 -; SSE2-NEXT: pmaxsw %xmm2, %xmm0 ; SSE2-NEXT: pxor %xmm4, %xmm3 ; SSE2-NEXT: pxor %xmm4, %xmm1 ; SSE2-NEXT: pmaxsw %xmm3, %xmm1 -; SSE2-NEXT: movdqa %xmm4, %xmm2 -; SSE2-NEXT: pxor %xmm4, %xmm2 -; SSE2-NEXT: pxor %xmm2, %xmm1 -; SSE2-NEXT: pxor %xmm0, %xmm2 -; SSE2-NEXT: pmaxsw %xmm1, %xmm2 -; SSE2-NEXT: pxor %xmm4, %xmm2 -; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1] ; SSE2-NEXT: pxor %xmm4, %xmm2 ; SSE2-NEXT: pxor %xmm4, %xmm0 ; SSE2-NEXT: pmaxsw %xmm2, %xmm0 -; SSE2-NEXT: pxor %xmm4, %xmm0 -; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] -; SSE2-NEXT: pxor %xmm4, %xmm0 +; SSE2-NEXT: pmaxsw %xmm1, %xmm0 +; SSE2-NEXT: movdqa %xmm0, %xmm1 ; SSE2-NEXT: pxor %xmm4, %xmm1 -; SSE2-NEXT: pmaxsw %xmm0, %xmm1 +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] ; SSE2-NEXT: pxor %xmm4, %xmm1 +; SSE2-NEXT: pmaxsw %xmm0, %xmm1 ; SSE2-NEXT: movdqa %xmm1, %xmm0 -; SSE2-NEXT: psrld $16, %xmm0 -; SSE2-NEXT: pxor %xmm4, %xmm1 ; SSE2-NEXT: pxor %xmm4, %xmm0 -; SSE2-NEXT: pmaxsw %xmm1, %xmm0 +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3] ; SSE2-NEXT: pxor %xmm4, %xmm0 -; SSE2-NEXT: movd %xmm0, %eax +; SSE2-NEXT: pmaxsw %xmm1, %xmm0 +; SSE2-NEXT: movdqa %xmm0, %xmm1 +; SSE2-NEXT: pxor %xmm4, %xmm1 +; SSE2-NEXT: psrld $16, %xmm1 +; SSE2-NEXT: pxor %xmm4, %xmm1 +; SSE2-NEXT: pmaxsw %xmm0, %xmm1 +; SSE2-NEXT: pxor %xmm4, %xmm1 +; SSE2-NEXT: movd %xmm1, %eax ; SSE2-NEXT: # kill: def $ax killed $ax killed $eax ; SSE2-NEXT: retq ; @@ -1406,47 +1399,38 @@ ; SSE2-LABEL: test_v64i16: ; SSE2: # %bb.0: ; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [32768,32768,32768,32768,32768,32768,32768,32768] -; SSE2-NEXT: pxor %xmm8, %xmm5 -; SSE2-NEXT: pxor %xmm8, %xmm1 -; SSE2-NEXT: pmaxsw %xmm5, %xmm1 -; SSE2-NEXT: pxor %xmm8, %xmm7 -; SSE2-NEXT: pxor %xmm8, %xmm3 -; SSE2-NEXT: pmaxsw %xmm7, %xmm3 -; SSE2-NEXT: pxor %xmm8, %xmm4 -; SSE2-NEXT: pxor %xmm8, %xmm0 -; SSE2-NEXT: pmaxsw %xmm4, %xmm0 ; SSE2-NEXT: pxor %xmm8, %xmm6 ; SSE2-NEXT: pxor %xmm8, %xmm2 ; SSE2-NEXT: pmaxsw %xmm6, %xmm2 -; SSE2-NEXT: movdqa %xmm8, %xmm4 ; SSE2-NEXT: pxor %xmm8, %xmm4 -; SSE2-NEXT: pxor %xmm4, %xmm2 -; SSE2-NEXT: pxor %xmm4, %xmm0 +; SSE2-NEXT: pxor %xmm8, %xmm0 +; SSE2-NEXT: pmaxsw %xmm4, %xmm0 ; SSE2-NEXT: pmaxsw %xmm2, %xmm0 -; SSE2-NEXT: pxor %xmm4, %xmm3 -; SSE2-NEXT: pxor %xmm4, %xmm1 +; SSE2-NEXT: pxor %xmm8, %xmm7 +; SSE2-NEXT: pxor %xmm8, %xmm3 +; SSE2-NEXT: pmaxsw %xmm7, %xmm3 +; SSE2-NEXT: pxor %xmm8, %xmm5 +; SSE2-NEXT: pxor %xmm8, %xmm1 +; SSE2-NEXT: pmaxsw %xmm5, %xmm1 ; SSE2-NEXT: pmaxsw %xmm3, %xmm1 -; SSE2-NEXT: pxor %xmm4, %xmm1 -; SSE2-NEXT: pxor %xmm4, %xmm0 -; SSE2-NEXT: pmaxsw %xmm1, %xmm0 +; SSE2-NEXT: pmaxsw %xmm0, %xmm1 +; SSE2-NEXT: movdqa %xmm1, %xmm0 ; SSE2-NEXT: pxor %xmm8, %xmm0 -; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] ; SSE2-NEXT: pxor %xmm8, %xmm0 +; SSE2-NEXT: pmaxsw %xmm1, %xmm0 +; SSE2-NEXT: movdqa %xmm0, %xmm1 ; SSE2-NEXT: pxor %xmm8, %xmm1 -; SSE2-NEXT: pmaxsw %xmm0, %xmm1 -; SSE2-NEXT: pxor %xmm8, %xmm1 -; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3] +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,2,3] ; SSE2-NEXT: pxor %xmm8, %xmm1 +; SSE2-NEXT: pmaxsw %xmm0, %xmm1 +; SSE2-NEXT: movdqa %xmm1, %xmm0 ; SSE2-NEXT: pxor %xmm8, %xmm0 -; SSE2-NEXT: pmaxsw %xmm1, %xmm0 +; SSE2-NEXT: psrld $16, %xmm0 ; SSE2-NEXT: pxor %xmm8, %xmm0 -; SSE2-NEXT: movdqa %xmm0, %xmm1 -; SSE2-NEXT: psrld $16, %xmm1 +; SSE2-NEXT: pmaxsw %xmm1, %xmm0 ; SSE2-NEXT: pxor %xmm8, %xmm0 -; SSE2-NEXT: pxor %xmm8, %xmm1 -; SSE2-NEXT: pmaxsw %xmm0, %xmm1 -; SSE2-NEXT: pxor %xmm8, %xmm1 -; SSE2-NEXT: movd %xmm1, %eax +; SSE2-NEXT: movd %xmm0, %eax ; SSE2-NEXT: # kill: def $ax killed $ax killed $eax ; SSE2-NEXT: retq ; Index: test/CodeGen/X86/vector-reduce-umin.ll =================================================================== --- test/CodeGen/X86/vector-reduce-umin.ll +++ test/CodeGen/X86/vector-reduce-umin.ll @@ -1140,15 +1140,14 @@ ; SSE2-NEXT: pxor %xmm2, %xmm0 ; SSE2-NEXT: pxor %xmm2, %xmm1 ; SSE2-NEXT: pminsw %xmm0, %xmm1 -; SSE2-NEXT: pxor %xmm2, %xmm1 -; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3] -; SSE2-NEXT: pxor %xmm2, %xmm1 +; SSE2-NEXT: movdqa %xmm1, %xmm0 ; SSE2-NEXT: pxor %xmm2, %xmm0 -; SSE2-NEXT: pminsw %xmm1, %xmm0 +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3] ; SSE2-NEXT: pxor %xmm2, %xmm0 +; SSE2-NEXT: pminsw %xmm1, %xmm0 ; SSE2-NEXT: movdqa %xmm0, %xmm1 +; SSE2-NEXT: pxor %xmm2, %xmm1 ; SSE2-NEXT: psrld $16, %xmm1 -; SSE2-NEXT: pxor %xmm2, %xmm0 ; SSE2-NEXT: pxor %xmm2, %xmm1 ; SSE2-NEXT: pminsw %xmm0, %xmm1 ; SSE2-NEXT: pxor %xmm2, %xmm1 @@ -1187,20 +1186,19 @@ ; SSE2-NEXT: pxor %xmm2, %xmm1 ; SSE2-NEXT: pxor %xmm2, %xmm0 ; SSE2-NEXT: pminsw %xmm1, %xmm0 -; SSE2-NEXT: pxor %xmm2, %xmm0 -; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; SSE2-NEXT: pxor %xmm2, %xmm0 -; SSE2-NEXT: pxor %xmm2, %xmm1 -; SSE2-NEXT: pminsw %xmm0, %xmm1 +; SSE2-NEXT: movdqa %xmm0, %xmm1 ; SSE2-NEXT: pxor %xmm2, %xmm1 -; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3] +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] ; SSE2-NEXT: pxor %xmm2, %xmm1 +; SSE2-NEXT: pminsw %xmm0, %xmm1 +; SSE2-NEXT: movdqa %xmm1, %xmm0 ; SSE2-NEXT: pxor %xmm2, %xmm0 -; SSE2-NEXT: pminsw %xmm1, %xmm0 +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3] ; SSE2-NEXT: pxor %xmm2, %xmm0 +; SSE2-NEXT: pminsw %xmm1, %xmm0 ; SSE2-NEXT: movdqa %xmm0, %xmm1 +; SSE2-NEXT: pxor %xmm2, %xmm1 ; SSE2-NEXT: psrld $16, %xmm1 -; SSE2-NEXT: pxor %xmm2, %xmm0 ; SSE2-NEXT: pxor %xmm2, %xmm1 ; SSE2-NEXT: pminsw %xmm0, %xmm1 ; SSE2-NEXT: pxor %xmm2, %xmm1 @@ -1253,35 +1251,30 @@ ; SSE2-LABEL: test_v32i16: ; SSE2: # %bb.0: ; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [32768,32768,32768,32768,32768,32768,32768,32768] -; SSE2-NEXT: pxor %xmm4, %xmm2 -; SSE2-NEXT: pxor %xmm4, %xmm0 -; SSE2-NEXT: pminsw %xmm2, %xmm0 ; SSE2-NEXT: pxor %xmm4, %xmm3 ; SSE2-NEXT: pxor %xmm4, %xmm1 ; SSE2-NEXT: pminsw %xmm3, %xmm1 -; SSE2-NEXT: movdqa %xmm4, %xmm2 -; SSE2-NEXT: pxor %xmm4, %xmm2 -; SSE2-NEXT: pxor %xmm2, %xmm1 -; SSE2-NEXT: pxor %xmm0, %xmm2 -; SSE2-NEXT: pminsw %xmm1, %xmm2 -; SSE2-NEXT: pxor %xmm4, %xmm2 -; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1] ; SSE2-NEXT: pxor %xmm4, %xmm2 ; SSE2-NEXT: pxor %xmm4, %xmm0 ; SSE2-NEXT: pminsw %xmm2, %xmm0 -; SSE2-NEXT: pxor %xmm4, %xmm0 -; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] -; SSE2-NEXT: pxor %xmm4, %xmm0 +; SSE2-NEXT: pminsw %xmm1, %xmm0 +; SSE2-NEXT: movdqa %xmm0, %xmm1 ; SSE2-NEXT: pxor %xmm4, %xmm1 -; SSE2-NEXT: pminsw %xmm0, %xmm1 +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] ; SSE2-NEXT: pxor %xmm4, %xmm1 +; SSE2-NEXT: pminsw %xmm0, %xmm1 ; SSE2-NEXT: movdqa %xmm1, %xmm0 -; SSE2-NEXT: psrld $16, %xmm0 -; SSE2-NEXT: pxor %xmm4, %xmm1 ; SSE2-NEXT: pxor %xmm4, %xmm0 -; SSE2-NEXT: pminsw %xmm1, %xmm0 +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3] ; SSE2-NEXT: pxor %xmm4, %xmm0 -; SSE2-NEXT: movd %xmm0, %eax +; SSE2-NEXT: pminsw %xmm1, %xmm0 +; SSE2-NEXT: movdqa %xmm0, %xmm1 +; SSE2-NEXT: pxor %xmm4, %xmm1 +; SSE2-NEXT: psrld $16, %xmm1 +; SSE2-NEXT: pxor %xmm4, %xmm1 +; SSE2-NEXT: pminsw %xmm0, %xmm1 +; SSE2-NEXT: pxor %xmm4, %xmm1 +; SSE2-NEXT: movd %xmm1, %eax ; SSE2-NEXT: # kill: def $ax killed $ax killed $eax ; SSE2-NEXT: retq ; @@ -1338,47 +1331,38 @@ ; SSE2-LABEL: test_v64i16: ; SSE2: # %bb.0: ; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [32768,32768,32768,32768,32768,32768,32768,32768] -; SSE2-NEXT: pxor %xmm8, %xmm5 -; SSE2-NEXT: pxor %xmm8, %xmm1 -; SSE2-NEXT: pminsw %xmm5, %xmm1 -; SSE2-NEXT: pxor %xmm8, %xmm7 -; SSE2-NEXT: pxor %xmm8, %xmm3 -; SSE2-NEXT: pminsw %xmm7, %xmm3 -; SSE2-NEXT: pxor %xmm8, %xmm4 -; SSE2-NEXT: pxor %xmm8, %xmm0 -; SSE2-NEXT: pminsw %xmm4, %xmm0 ; SSE2-NEXT: pxor %xmm8, %xmm6 ; SSE2-NEXT: pxor %xmm8, %xmm2 ; SSE2-NEXT: pminsw %xmm6, %xmm2 -; SSE2-NEXT: movdqa %xmm8, %xmm4 ; SSE2-NEXT: pxor %xmm8, %xmm4 -; SSE2-NEXT: pxor %xmm4, %xmm2 -; SSE2-NEXT: pxor %xmm4, %xmm0 +; SSE2-NEXT: pxor %xmm8, %xmm0 +; SSE2-NEXT: pminsw %xmm4, %xmm0 ; SSE2-NEXT: pminsw %xmm2, %xmm0 -; SSE2-NEXT: pxor %xmm4, %xmm3 -; SSE2-NEXT: pxor %xmm4, %xmm1 +; SSE2-NEXT: pxor %xmm8, %xmm7 +; SSE2-NEXT: pxor %xmm8, %xmm3 +; SSE2-NEXT: pminsw %xmm7, %xmm3 +; SSE2-NEXT: pxor %xmm8, %xmm5 +; SSE2-NEXT: pxor %xmm8, %xmm1 +; SSE2-NEXT: pminsw %xmm5, %xmm1 ; SSE2-NEXT: pminsw %xmm3, %xmm1 -; SSE2-NEXT: pxor %xmm4, %xmm1 -; SSE2-NEXT: pxor %xmm4, %xmm0 -; SSE2-NEXT: pminsw %xmm1, %xmm0 +; SSE2-NEXT: pminsw %xmm0, %xmm1 +; SSE2-NEXT: movdqa %xmm1, %xmm0 ; SSE2-NEXT: pxor %xmm8, %xmm0 -; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] ; SSE2-NEXT: pxor %xmm8, %xmm0 +; SSE2-NEXT: pminsw %xmm1, %xmm0 +; SSE2-NEXT: movdqa %xmm0, %xmm1 ; SSE2-NEXT: pxor %xmm8, %xmm1 -; SSE2-NEXT: pminsw %xmm0, %xmm1 -; SSE2-NEXT: pxor %xmm8, %xmm1 -; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3] +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,2,3] ; SSE2-NEXT: pxor %xmm8, %xmm1 +; SSE2-NEXT: pminsw %xmm0, %xmm1 +; SSE2-NEXT: movdqa %xmm1, %xmm0 ; SSE2-NEXT: pxor %xmm8, %xmm0 -; SSE2-NEXT: pminsw %xmm1, %xmm0 +; SSE2-NEXT: psrld $16, %xmm0 ; SSE2-NEXT: pxor %xmm8, %xmm0 -; SSE2-NEXT: movdqa %xmm0, %xmm1 -; SSE2-NEXT: psrld $16, %xmm1 +; SSE2-NEXT: pminsw %xmm1, %xmm0 ; SSE2-NEXT: pxor %xmm8, %xmm0 -; SSE2-NEXT: pxor %xmm8, %xmm1 -; SSE2-NEXT: pminsw %xmm0, %xmm1 -; SSE2-NEXT: pxor %xmm8, %xmm1 -; SSE2-NEXT: movd %xmm1, %eax +; SSE2-NEXT: movd %xmm0, %eax ; SSE2-NEXT: # kill: def $ax killed $ax killed $eax ; SSE2-NEXT: retq ; Index: test/CodeGen/X86/vector-shift-lshr-128.ll =================================================================== --- test/CodeGen/X86/vector-shift-lshr-128.ll +++ test/CodeGen/X86/vector-shift-lshr-128.ll @@ -373,8 +373,8 @@ ; SSE2-NEXT: movdqa %xmm3, %xmm4 ; SSE2-NEXT: pandn %xmm0, %xmm4 ; SSE2-NEXT: psrlw $4, %xmm0 -; SSE2-NEXT: pand {{.*}}(%rip), %xmm0 ; SSE2-NEXT: pand %xmm3, %xmm0 +; SSE2-NEXT: pand {{.*}}(%rip), %xmm0 ; SSE2-NEXT: por %xmm4, %xmm0 ; SSE2-NEXT: paddb %xmm1, %xmm1 ; SSE2-NEXT: pxor %xmm3, %xmm3 @@ -382,16 +382,16 @@ ; SSE2-NEXT: movdqa %xmm3, %xmm4 ; SSE2-NEXT: pandn %xmm0, %xmm4 ; SSE2-NEXT: psrlw $2, %xmm0 -; SSE2-NEXT: pand {{.*}}(%rip), %xmm0 ; SSE2-NEXT: pand %xmm3, %xmm0 +; SSE2-NEXT: pand {{.*}}(%rip), %xmm0 ; SSE2-NEXT: por %xmm4, %xmm0 ; SSE2-NEXT: paddb %xmm1, %xmm1 ; SSE2-NEXT: pcmpgtb %xmm1, %xmm2 ; SSE2-NEXT: movdqa %xmm2, %xmm1 ; SSE2-NEXT: pandn %xmm0, %xmm1 ; SSE2-NEXT: psrlw $1, %xmm0 -; SSE2-NEXT: pand {{.*}}(%rip), %xmm0 ; SSE2-NEXT: pand %xmm2, %xmm0 +; SSE2-NEXT: pand {{.*}}(%rip), %xmm0 ; SSE2-NEXT: por %xmm1, %xmm0 ; SSE2-NEXT: retq ; @@ -488,8 +488,8 @@ ; X32-SSE-NEXT: movdqa %xmm3, %xmm4 ; X32-SSE-NEXT: pandn %xmm0, %xmm4 ; X32-SSE-NEXT: psrlw $4, %xmm0 -; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0 ; X32-SSE-NEXT: pand %xmm3, %xmm0 +; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0 ; X32-SSE-NEXT: por %xmm4, %xmm0 ; X32-SSE-NEXT: paddb %xmm1, %xmm1 ; X32-SSE-NEXT: pxor %xmm3, %xmm3 @@ -497,16 +497,16 @@ ; X32-SSE-NEXT: movdqa %xmm3, %xmm4 ; X32-SSE-NEXT: pandn %xmm0, %xmm4 ; X32-SSE-NEXT: psrlw $2, %xmm0 -; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0 ; X32-SSE-NEXT: pand %xmm3, %xmm0 +; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0 ; X32-SSE-NEXT: por %xmm4, %xmm0 ; X32-SSE-NEXT: paddb %xmm1, %xmm1 ; X32-SSE-NEXT: pcmpgtb %xmm1, %xmm2 ; X32-SSE-NEXT: movdqa %xmm2, %xmm1 ; X32-SSE-NEXT: pandn %xmm0, %xmm1 ; X32-SSE-NEXT: psrlw $1, %xmm0 -; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0 ; X32-SSE-NEXT: pand %xmm2, %xmm0 +; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0 ; X32-SSE-NEXT: por %xmm1, %xmm0 ; X32-SSE-NEXT: retl %shift = lshr <16 x i8> %a, %b Index: test/CodeGen/X86/vector-shift-shl-128.ll =================================================================== --- test/CodeGen/X86/vector-shift-shl-128.ll +++ test/CodeGen/X86/vector-shift-shl-128.ll @@ -295,8 +295,8 @@ ; SSE2-NEXT: movdqa %xmm3, %xmm4 ; SSE2-NEXT: pandn %xmm0, %xmm4 ; SSE2-NEXT: psllw $4, %xmm0 -; SSE2-NEXT: pand {{.*}}(%rip), %xmm0 ; SSE2-NEXT: pand %xmm3, %xmm0 +; SSE2-NEXT: pand {{.*}}(%rip), %xmm0 ; SSE2-NEXT: por %xmm4, %xmm0 ; SSE2-NEXT: paddb %xmm1, %xmm1 ; SSE2-NEXT: pxor %xmm3, %xmm3 @@ -304,8 +304,8 @@ ; SSE2-NEXT: movdqa %xmm3, %xmm4 ; SSE2-NEXT: pandn %xmm0, %xmm4 ; SSE2-NEXT: psllw $2, %xmm0 -; SSE2-NEXT: pand {{.*}}(%rip), %xmm0 ; SSE2-NEXT: pand %xmm3, %xmm0 +; SSE2-NEXT: pand {{.*}}(%rip), %xmm0 ; SSE2-NEXT: por %xmm4, %xmm0 ; SSE2-NEXT: paddb %xmm1, %xmm1 ; SSE2-NEXT: pcmpgtb %xmm1, %xmm2 @@ -405,8 +405,8 @@ ; X32-SSE-NEXT: movdqa %xmm3, %xmm4 ; X32-SSE-NEXT: pandn %xmm0, %xmm4 ; X32-SSE-NEXT: psllw $4, %xmm0 -; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0 ; X32-SSE-NEXT: pand %xmm3, %xmm0 +; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0 ; X32-SSE-NEXT: por %xmm4, %xmm0 ; X32-SSE-NEXT: paddb %xmm1, %xmm1 ; X32-SSE-NEXT: pxor %xmm3, %xmm3 @@ -414,8 +414,8 @@ ; X32-SSE-NEXT: movdqa %xmm3, %xmm4 ; X32-SSE-NEXT: pandn %xmm0, %xmm4 ; X32-SSE-NEXT: psllw $2, %xmm0 -; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0 ; X32-SSE-NEXT: pand %xmm3, %xmm0 +; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0 ; X32-SSE-NEXT: por %xmm4, %xmm0 ; X32-SSE-NEXT: paddb %xmm1, %xmm1 ; X32-SSE-NEXT: pcmpgtb %xmm1, %xmm2 Index: test/CodeGen/X86/vector-shuffle-256-v16.ll =================================================================== --- test/CodeGen/X86/vector-shuffle-256-v16.ll +++ test/CodeGen/X86/vector-shuffle-256-v16.ll @@ -846,7 +846,7 @@ define <16 x i16> @shuffle_v16i16_00_17_02_19_04_21_06_23_08_25_10_27_12_29_14_31(<16 x i16> %a, <16 x i16> %b) { ; AVX1-LABEL: shuffle_v16i16_00_17_02_19_04_21_06_23_08_25_10_27_12_29_14_31: ; AVX1: # %bb.0: -; AVX1-NEXT: vmovaps {{.*#+}} ymm2 = [65535,0,65535,0,65535,0,65535,0,65535,0,65535,0,65535,0,65535,0] +; AVX1-NEXT: vbroadcastss {{.*#+}} ymm2 = [9.18340949E-41,9.18340949E-41,9.18340949E-41,9.18340949E-41,9.18340949E-41,9.18340949E-41,9.18340949E-41,9.18340949E-41] ; AVX1-NEXT: vandnps %ymm1, %ymm2, %ymm1 ; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm0 ; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm0 @@ -863,7 +863,7 @@ define <16 x i16> @shuffle_v16i16_16_01_18_03_20_05_22_07_24_09_26_11_28_13_30_15(<16 x i16> %a, <16 x i16> %b) { ; AVX1-LABEL: shuffle_v16i16_16_01_18_03_20_05_22_07_24_09_26_11_28_13_30_15: ; AVX1: # %bb.0: -; AVX1-NEXT: vmovaps {{.*#+}} ymm2 = [65535,0,65535,0,65535,0,65535,0,65535,0,65535,0,65535,0,65535,0] +; AVX1-NEXT: vbroadcastss {{.*#+}} ymm2 = [9.18340949E-41,9.18340949E-41,9.18340949E-41,9.18340949E-41,9.18340949E-41,9.18340949E-41,9.18340949E-41,9.18340949E-41] ; AVX1-NEXT: vandnps %ymm0, %ymm2, %ymm0 ; AVX1-NEXT: vandps %ymm2, %ymm1, %ymm1 ; AVX1-NEXT: vorps %ymm0, %ymm1, %ymm0 Index: test/CodeGen/X86/vector-trunc-math.ll =================================================================== --- test/CodeGen/X86/vector-trunc-math.ll +++ test/CodeGen/X86/vector-trunc-math.ll @@ -233,7 +233,7 @@ ; AVX1-NEXT: vextractf128 $1, %ymm7, %xmm7 ; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm3 ; AVX1-NEXT: vpaddq %xmm7, %xmm3, %xmm3 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm7 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0] +; AVX1-NEXT: vmovddup {{.*#+}} xmm7 = mem[0,0] ; AVX1-NEXT: vpand %xmm7, %xmm3, %xmm3 ; AVX1-NEXT: vpand %xmm7, %xmm6, %xmm6 ; AVX1-NEXT: vpackusdw %xmm3, %xmm6, %xmm3 @@ -347,7 +347,7 @@ ; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm3 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1 ; AVX1-NEXT: vpaddd %xmm3, %xmm1, %xmm1 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0] +; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [3.57331108E-43,3.57331108E-43,3.57331108E-43,3.57331108E-43] ; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1 ; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2 ; AVX1-NEXT: vpackusdw %xmm1, %xmm2, %xmm1 @@ -680,22 +680,22 @@ ; AVX1-LABEL: trunc_add_const_v16i64_v16i8: ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0] -; AVX1-NEXT: vpand %xmm5, %xmm4, %xmm4 -; AVX1-NEXT: vpand %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vmovddup {{.*#+}} xmm5 = mem[0,0] +; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4 +; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3 ; AVX1-NEXT: vpackusdw %xmm4, %xmm3, %xmm3 ; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4 -; AVX1-NEXT: vpand %xmm5, %xmm4, %xmm4 -; AVX1-NEXT: vpand %xmm5, %xmm2, %xmm2 +; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4 +; AVX1-NEXT: vandpd %xmm5, %xmm2, %xmm2 ; AVX1-NEXT: vpackusdw %xmm4, %xmm2, %xmm2 ; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3 -; AVX1-NEXT: vpand %xmm5, %xmm3, %xmm3 -; AVX1-NEXT: vpand %xmm5, %xmm1, %xmm1 +; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vandpd %xmm5, %xmm1, %xmm1 ; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3 -; AVX1-NEXT: vpand %xmm5, %xmm3, %xmm3 -; AVX1-NEXT: vpand %xmm5, %xmm0, %xmm0 +; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vandpd %xmm5, %xmm0, %xmm0 ; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0 ; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0 @@ -781,13 +781,13 @@ ; AVX1-LABEL: trunc_add_const_v16i32_v16i8: ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0] -; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2 -; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1 +; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [3.57331108E-43,3.57331108E-43,3.57331108E-43,3.57331108E-43] +; AVX1-NEXT: vandps %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vandps %xmm3, %xmm1, %xmm1 ; AVX1-NEXT: vpackusdw %xmm2, %xmm1, %xmm1 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 -; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2 -; AVX1-NEXT: vpand %xmm3, %xmm0, %xmm0 +; AVX1-NEXT: vandps %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vandps %xmm3, %xmm0, %xmm0 ; AVX1-NEXT: vpackusdw %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpaddb {{.*}}(%rip), %xmm0, %xmm0 @@ -1106,7 +1106,7 @@ ; AVX1-NEXT: vextractf128 $1, %ymm7, %xmm7 ; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm3 ; AVX1-NEXT: vpsubq %xmm7, %xmm3, %xmm3 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm7 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0] +; AVX1-NEXT: vmovddup {{.*#+}} xmm7 = mem[0,0] ; AVX1-NEXT: vpand %xmm7, %xmm3, %xmm3 ; AVX1-NEXT: vpand %xmm7, %xmm6, %xmm6 ; AVX1-NEXT: vpackusdw %xmm3, %xmm6, %xmm3 @@ -1220,7 +1220,7 @@ ; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm3 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1 ; AVX1-NEXT: vpsubd %xmm3, %xmm1, %xmm1 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0] +; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [3.57331108E-43,3.57331108E-43,3.57331108E-43,3.57331108E-43] ; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1 ; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2 ; AVX1-NEXT: vpackusdw %xmm1, %xmm2, %xmm1 @@ -1575,7 +1575,7 @@ ; AVX1-NEXT: vpsubq {{.*}}(%rip), %xmm3, %xmm7 ; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm3 ; AVX1-NEXT: vpsubq {{.*}}(%rip), %xmm3, %xmm3 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0] +; AVX1-NEXT: vmovddup {{.*#+}} xmm4 = mem[0,0] ; AVX1-NEXT: vpand %xmm4, %xmm3, %xmm3 ; AVX1-NEXT: vpand %xmm4, %xmm7, %xmm7 ; AVX1-NEXT: vpackusdw %xmm3, %xmm7, %xmm3 @@ -1687,7 +1687,7 @@ ; AVX1-NEXT: vpsubd {{.*}}(%rip), %xmm1, %xmm3 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1 ; AVX1-NEXT: vpsubd {{.*}}(%rip), %xmm1, %xmm1 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0] +; AVX1-NEXT: vbroadcastss {{.*#+}} xmm4 = [3.57331108E-43,3.57331108E-43,3.57331108E-43,3.57331108E-43] ; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm1 ; AVX1-NEXT: vpand %xmm4, %xmm3, %xmm3 ; AVX1-NEXT: vpackusdw %xmm1, %xmm3, %xmm1 @@ -2275,7 +2275,7 @@ ; AVX1-NEXT: vpsllq $32, %xmm6, %xmm6 ; AVX1-NEXT: vpmuludq %xmm4, %xmm3, %xmm3 ; AVX1-NEXT: vpaddq %xmm6, %xmm3, %xmm3 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0] +; AVX1-NEXT: vmovddup {{.*#+}} xmm4 = mem[0,0] ; AVX1-NEXT: vpand %xmm4, %xmm3, %xmm3 ; AVX1-NEXT: vpand %xmm4, %xmm2, %xmm2 ; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2 @@ -2451,7 +2451,7 @@ ; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm3 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1 ; AVX1-NEXT: vpmulld %xmm3, %xmm1, %xmm1 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0] +; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [3.57331108E-43,3.57331108E-43,3.57331108E-43,3.57331108E-43] ; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1 ; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2 ; AVX1-NEXT: vpackusdw %xmm1, %xmm2, %xmm1 @@ -2909,7 +2909,7 @@ ; AVX1-NEXT: vpmuludq %xmm6, %xmm3, %xmm3 ; AVX1-NEXT: vpsllq $32, %xmm3, %xmm3 ; AVX1-NEXT: vpaddq %xmm3, %xmm7, %xmm3 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0] +; AVX1-NEXT: vmovddup {{.*#+}} xmm6 = mem[0,0] ; AVX1-NEXT: vpand %xmm6, %xmm3, %xmm3 ; AVX1-NEXT: vpand %xmm6, %xmm0, %xmm0 ; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0 @@ -3049,7 +3049,7 @@ ; AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm3 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1 ; AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0] +; AVX1-NEXT: vbroadcastss {{.*#+}} xmm4 = [3.57331108E-43,3.57331108E-43,3.57331108E-43,3.57331108E-43] ; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm1 ; AVX1-NEXT: vpand %xmm4, %xmm3, %xmm3 ; AVX1-NEXT: vpackusdw %xmm1, %xmm3, %xmm1 @@ -3351,27 +3351,27 @@ ; ; AVX1-LABEL: trunc_and_v16i64_v16i8: ; AVX1: # %bb.0: -; AVX1-NEXT: vandps %ymm4, %ymm0, %ymm0 -; AVX1-NEXT: vandps %ymm5, %ymm1, %ymm1 -; AVX1-NEXT: vandps %ymm6, %ymm2, %ymm2 -; AVX1-NEXT: vandps %ymm7, %ymm3, %ymm3 +; AVX1-NEXT: vandpd %ymm4, %ymm0, %ymm0 +; AVX1-NEXT: vandpd %ymm5, %ymm1, %ymm1 +; AVX1-NEXT: vandpd %ymm6, %ymm2, %ymm2 +; AVX1-NEXT: vandpd %ymm7, %ymm3, %ymm3 ; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4 -; AVX1-NEXT: vmovaps {{.*#+}} xmm5 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0] -; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4 -; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vmovddup {{.*#+}} xmm5 = mem[0,0] +; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4 +; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3 ; AVX1-NEXT: vpackusdw %xmm4, %xmm3, %xmm3 ; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4 -; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4 -; AVX1-NEXT: vandps %xmm5, %xmm2, %xmm2 +; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4 +; AVX1-NEXT: vandpd %xmm5, %xmm2, %xmm2 ; AVX1-NEXT: vpackusdw %xmm4, %xmm2, %xmm2 ; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3 -; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3 -; AVX1-NEXT: vandps %xmm5, %xmm1, %xmm1 +; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vandpd %xmm5, %xmm1, %xmm1 ; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3 -; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3 -; AVX1-NEXT: vandps %xmm5, %xmm0, %xmm0 +; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vandpd %xmm5, %xmm0, %xmm0 ; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0 ; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0 @@ -3468,7 +3468,7 @@ ; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm0 ; AVX1-NEXT: vandps %ymm3, %ymm1, %ymm1 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 -; AVX1-NEXT: vmovaps {{.*#+}} xmm3 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0] +; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [3.57331108E-43,3.57331108E-43,3.57331108E-43,3.57331108E-43] ; AVX1-NEXT: vandps %xmm3, %xmm2, %xmm2 ; AVX1-NEXT: vandps %xmm3, %xmm1, %xmm1 ; AVX1-NEXT: vpackusdw %xmm2, %xmm1, %xmm1 @@ -3751,22 +3751,22 @@ ; AVX1-LABEL: trunc_and_const_v16i64_v16i8: ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0] -; AVX1-NEXT: vpand %xmm5, %xmm4, %xmm4 -; AVX1-NEXT: vpand %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vmovddup {{.*#+}} xmm5 = mem[0,0] +; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4 +; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3 ; AVX1-NEXT: vpackusdw %xmm4, %xmm3, %xmm3 ; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4 -; AVX1-NEXT: vpand %xmm5, %xmm4, %xmm4 -; AVX1-NEXT: vpand %xmm5, %xmm2, %xmm2 +; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4 +; AVX1-NEXT: vandpd %xmm5, %xmm2, %xmm2 ; AVX1-NEXT: vpackusdw %xmm4, %xmm2, %xmm2 ; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3 -; AVX1-NEXT: vpand %xmm5, %xmm3, %xmm3 -; AVX1-NEXT: vpand %xmm5, %xmm1, %xmm1 +; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vandpd %xmm5, %xmm1, %xmm1 ; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3 -; AVX1-NEXT: vpand %xmm5, %xmm3, %xmm3 -; AVX1-NEXT: vpand %xmm5, %xmm0, %xmm0 +; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vandpd %xmm5, %xmm0, %xmm0 ; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0 ; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0 @@ -3852,13 +3852,13 @@ ; AVX1-LABEL: trunc_and_const_v16i32_v16i8: ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0] -; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2 -; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1 +; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [3.57331108E-43,3.57331108E-43,3.57331108E-43,3.57331108E-43] +; AVX1-NEXT: vandps %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vandps %xmm3, %xmm1, %xmm1 ; AVX1-NEXT: vpackusdw %xmm2, %xmm1, %xmm1 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 -; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2 -; AVX1-NEXT: vpand %xmm3, %xmm0, %xmm0 +; AVX1-NEXT: vandps %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vandps %xmm3, %xmm0, %xmm0 ; AVX1-NEXT: vpackusdw %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 @@ -4153,27 +4153,27 @@ ; ; AVX1-LABEL: trunc_xor_v16i64_v16i8: ; AVX1: # %bb.0: -; AVX1-NEXT: vxorps %ymm4, %ymm0, %ymm0 -; AVX1-NEXT: vxorps %ymm5, %ymm1, %ymm1 -; AVX1-NEXT: vxorps %ymm6, %ymm2, %ymm2 -; AVX1-NEXT: vxorps %ymm7, %ymm3, %ymm3 +; AVX1-NEXT: vxorpd %ymm4, %ymm0, %ymm0 +; AVX1-NEXT: vxorpd %ymm5, %ymm1, %ymm1 +; AVX1-NEXT: vxorpd %ymm6, %ymm2, %ymm2 +; AVX1-NEXT: vxorpd %ymm7, %ymm3, %ymm3 ; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4 -; AVX1-NEXT: vmovaps {{.*#+}} xmm5 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0] -; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4 -; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vmovddup {{.*#+}} xmm5 = mem[0,0] +; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4 +; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3 ; AVX1-NEXT: vpackusdw %xmm4, %xmm3, %xmm3 ; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4 -; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4 -; AVX1-NEXT: vandps %xmm5, %xmm2, %xmm2 +; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4 +; AVX1-NEXT: vandpd %xmm5, %xmm2, %xmm2 ; AVX1-NEXT: vpackusdw %xmm4, %xmm2, %xmm2 ; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3 -; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3 -; AVX1-NEXT: vandps %xmm5, %xmm1, %xmm1 +; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vandpd %xmm5, %xmm1, %xmm1 ; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3 -; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3 -; AVX1-NEXT: vandps %xmm5, %xmm0, %xmm0 +; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vandpd %xmm5, %xmm0, %xmm0 ; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0 ; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0 @@ -4270,7 +4270,7 @@ ; AVX1-NEXT: vxorps %ymm2, %ymm0, %ymm0 ; AVX1-NEXT: vxorps %ymm3, %ymm1, %ymm1 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 -; AVX1-NEXT: vmovaps {{.*#+}} xmm3 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0] +; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [3.57331108E-43,3.57331108E-43,3.57331108E-43,3.57331108E-43] ; AVX1-NEXT: vandps %xmm3, %xmm2, %xmm2 ; AVX1-NEXT: vandps %xmm3, %xmm1, %xmm1 ; AVX1-NEXT: vpackusdw %xmm2, %xmm1, %xmm1 @@ -4553,22 +4553,22 @@ ; AVX1-LABEL: trunc_xor_const_v16i64_v16i8: ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0] -; AVX1-NEXT: vpand %xmm5, %xmm4, %xmm4 -; AVX1-NEXT: vpand %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vmovddup {{.*#+}} xmm5 = mem[0,0] +; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4 +; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3 ; AVX1-NEXT: vpackusdw %xmm4, %xmm3, %xmm3 ; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4 -; AVX1-NEXT: vpand %xmm5, %xmm4, %xmm4 -; AVX1-NEXT: vpand %xmm5, %xmm2, %xmm2 +; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4 +; AVX1-NEXT: vandpd %xmm5, %xmm2, %xmm2 ; AVX1-NEXT: vpackusdw %xmm4, %xmm2, %xmm2 ; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3 -; AVX1-NEXT: vpand %xmm5, %xmm3, %xmm3 -; AVX1-NEXT: vpand %xmm5, %xmm1, %xmm1 +; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vandpd %xmm5, %xmm1, %xmm1 ; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3 -; AVX1-NEXT: vpand %xmm5, %xmm3, %xmm3 -; AVX1-NEXT: vpand %xmm5, %xmm0, %xmm0 +; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vandpd %xmm5, %xmm0, %xmm0 ; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0 ; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0 @@ -4654,13 +4654,13 @@ ; AVX1-LABEL: trunc_xor_const_v16i32_v16i8: ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0] -; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2 -; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1 +; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [3.57331108E-43,3.57331108E-43,3.57331108E-43,3.57331108E-43] +; AVX1-NEXT: vandps %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vandps %xmm3, %xmm1, %xmm1 ; AVX1-NEXT: vpackusdw %xmm2, %xmm1, %xmm1 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 -; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2 -; AVX1-NEXT: vpand %xmm3, %xmm0, %xmm0 +; AVX1-NEXT: vandps %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vandps %xmm3, %xmm0, %xmm0 ; AVX1-NEXT: vpackusdw %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0 @@ -4955,27 +4955,27 @@ ; ; AVX1-LABEL: trunc_or_v16i64_v16i8: ; AVX1: # %bb.0: -; AVX1-NEXT: vorps %ymm4, %ymm0, %ymm0 -; AVX1-NEXT: vorps %ymm5, %ymm1, %ymm1 -; AVX1-NEXT: vorps %ymm6, %ymm2, %ymm2 -; AVX1-NEXT: vorps %ymm7, %ymm3, %ymm3 +; AVX1-NEXT: vorpd %ymm4, %ymm0, %ymm0 +; AVX1-NEXT: vorpd %ymm5, %ymm1, %ymm1 +; AVX1-NEXT: vorpd %ymm6, %ymm2, %ymm2 +; AVX1-NEXT: vorpd %ymm7, %ymm3, %ymm3 ; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4 -; AVX1-NEXT: vmovaps {{.*#+}} xmm5 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0] -; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4 -; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vmovddup {{.*#+}} xmm5 = mem[0,0] +; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4 +; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3 ; AVX1-NEXT: vpackusdw %xmm4, %xmm3, %xmm3 ; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4 -; AVX1-NEXT: vandps %xmm5, %xmm4, %xmm4 -; AVX1-NEXT: vandps %xmm5, %xmm2, %xmm2 +; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4 +; AVX1-NEXT: vandpd %xmm5, %xmm2, %xmm2 ; AVX1-NEXT: vpackusdw %xmm4, %xmm2, %xmm2 ; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3 -; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3 -; AVX1-NEXT: vandps %xmm5, %xmm1, %xmm1 +; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vandpd %xmm5, %xmm1, %xmm1 ; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3 -; AVX1-NEXT: vandps %xmm5, %xmm3, %xmm3 -; AVX1-NEXT: vandps %xmm5, %xmm0, %xmm0 +; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vandpd %xmm5, %xmm0, %xmm0 ; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0 ; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0 @@ -5072,7 +5072,7 @@ ; AVX1-NEXT: vorps %ymm2, %ymm0, %ymm0 ; AVX1-NEXT: vorps %ymm3, %ymm1, %ymm1 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 -; AVX1-NEXT: vmovaps {{.*#+}} xmm3 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0] +; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [3.57331108E-43,3.57331108E-43,3.57331108E-43,3.57331108E-43] ; AVX1-NEXT: vandps %xmm3, %xmm2, %xmm2 ; AVX1-NEXT: vandps %xmm3, %xmm1, %xmm1 ; AVX1-NEXT: vpackusdw %xmm2, %xmm1, %xmm1 @@ -5355,22 +5355,22 @@ ; AVX1-LABEL: trunc_or_const_v16i64_v16i8: ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0] -; AVX1-NEXT: vpand %xmm5, %xmm4, %xmm4 -; AVX1-NEXT: vpand %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vmovddup {{.*#+}} xmm5 = mem[0,0] +; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4 +; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3 ; AVX1-NEXT: vpackusdw %xmm4, %xmm3, %xmm3 ; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4 -; AVX1-NEXT: vpand %xmm5, %xmm4, %xmm4 -; AVX1-NEXT: vpand %xmm5, %xmm2, %xmm2 +; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4 +; AVX1-NEXT: vandpd %xmm5, %xmm2, %xmm2 ; AVX1-NEXT: vpackusdw %xmm4, %xmm2, %xmm2 ; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3 -; AVX1-NEXT: vpand %xmm5, %xmm3, %xmm3 -; AVX1-NEXT: vpand %xmm5, %xmm1, %xmm1 +; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vandpd %xmm5, %xmm1, %xmm1 ; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3 -; AVX1-NEXT: vpand %xmm5, %xmm3, %xmm3 -; AVX1-NEXT: vpand %xmm5, %xmm0, %xmm0 +; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vandpd %xmm5, %xmm0, %xmm0 ; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0 ; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0 @@ -5456,13 +5456,13 @@ ; AVX1-LABEL: trunc_or_const_v16i32_v16i8: ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0] -; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2 -; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1 +; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [3.57331108E-43,3.57331108E-43,3.57331108E-43,3.57331108E-43] +; AVX1-NEXT: vandps %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vandps %xmm3, %xmm1, %xmm1 ; AVX1-NEXT: vpackusdw %xmm2, %xmm1, %xmm1 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 -; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2 -; AVX1-NEXT: vpand %xmm3, %xmm0, %xmm0 +; AVX1-NEXT: vandps %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vandps %xmm3, %xmm0, %xmm0 ; AVX1-NEXT: vpackusdw %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpor {{.*}}(%rip), %xmm0, %xmm0 Index: test/CodeGen/X86/vector-trunc-packus.ll =================================================================== --- test/CodeGen/X86/vector-trunc-packus.ll +++ test/CodeGen/X86/vector-trunc-packus.ll @@ -1087,25 +1087,27 @@ ; SSE2-NEXT: movdqa %xmm2, %xmm3 ; SSE2-NEXT: pcmpgtd %xmm1, %xmm3 ; SSE2-NEXT: pand %xmm3, %xmm1 -; SSE2-NEXT: pandn %xmm2, %xmm3 +; SSE2-NEXT: pcmpeqd %xmm4, %xmm4 +; SSE2-NEXT: pxor %xmm4, %xmm3 +; SSE2-NEXT: psrld $16, %xmm3 ; SSE2-NEXT: por %xmm1, %xmm3 -; SSE2-NEXT: movdqa %xmm2, %xmm1 -; SSE2-NEXT: pcmpgtd %xmm0, %xmm1 -; SSE2-NEXT: pand %xmm1, %xmm0 -; SSE2-NEXT: pandn %xmm2, %xmm1 -; SSE2-NEXT: por %xmm0, %xmm1 -; SSE2-NEXT: pxor %xmm2, %xmm2 -; SSE2-NEXT: movdqa %xmm1, %xmm0 -; SSE2-NEXT: pcmpgtd %xmm2, %xmm0 -; SSE2-NEXT: pand %xmm1, %xmm0 -; SSE2-NEXT: movdqa %xmm3, %xmm1 -; SSE2-NEXT: pcmpgtd %xmm2, %xmm1 -; SSE2-NEXT: pand %xmm3, %xmm1 -; SSE2-NEXT: pslld $16, %xmm1 -; SSE2-NEXT: psrad $16, %xmm1 +; SSE2-NEXT: pcmpgtd %xmm0, %xmm2 +; SSE2-NEXT: pand %xmm2, %xmm0 +; SSE2-NEXT: pxor %xmm4, %xmm2 +; SSE2-NEXT: psrld $16, %xmm2 +; SSE2-NEXT: por %xmm0, %xmm2 +; SSE2-NEXT: pxor %xmm1, %xmm1 +; SSE2-NEXT: movdqa %xmm2, %xmm0 +; SSE2-NEXT: pcmpgtd %xmm1, %xmm0 +; SSE2-NEXT: pand %xmm2, %xmm0 +; SSE2-NEXT: movdqa %xmm3, %xmm2 +; SSE2-NEXT: pcmpgtd %xmm1, %xmm2 +; SSE2-NEXT: pand %xmm3, %xmm2 +; SSE2-NEXT: pslld $16, %xmm2 +; SSE2-NEXT: psrad $16, %xmm2 ; SSE2-NEXT: pslld $16, %xmm0 ; SSE2-NEXT: psrad $16, %xmm0 -; SSE2-NEXT: packssdw %xmm1, %xmm0 +; SSE2-NEXT: packssdw %xmm2, %xmm0 ; SSE2-NEXT: retq ; ; SSSE3-LABEL: trunc_packus_v8i32_v8i16: @@ -1114,24 +1116,26 @@ ; SSSE3-NEXT: movdqa %xmm2, %xmm3 ; SSSE3-NEXT: pcmpgtd %xmm1, %xmm3 ; SSSE3-NEXT: pand %xmm3, %xmm1 -; SSSE3-NEXT: pandn %xmm2, %xmm3 +; SSSE3-NEXT: pcmpeqd %xmm4, %xmm4 +; SSSE3-NEXT: pxor %xmm4, %xmm3 +; SSSE3-NEXT: psrld $16, %xmm3 ; SSSE3-NEXT: por %xmm1, %xmm3 -; SSSE3-NEXT: movdqa %xmm2, %xmm1 -; SSSE3-NEXT: pcmpgtd %xmm0, %xmm1 -; SSSE3-NEXT: pand %xmm1, %xmm0 -; SSSE3-NEXT: pandn %xmm2, %xmm1 -; SSSE3-NEXT: por %xmm0, %xmm1 -; SSSE3-NEXT: pxor %xmm2, %xmm2 -; SSSE3-NEXT: movdqa %xmm1, %xmm0 -; SSSE3-NEXT: pcmpgtd %xmm2, %xmm0 -; SSSE3-NEXT: pand %xmm1, %xmm0 -; SSSE3-NEXT: movdqa %xmm3, %xmm1 -; SSSE3-NEXT: pcmpgtd %xmm2, %xmm1 -; SSSE3-NEXT: pand %xmm3, %xmm1 -; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] -; SSSE3-NEXT: pshufb %xmm2, %xmm1 -; SSSE3-NEXT: pshufb %xmm2, %xmm0 -; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; SSSE3-NEXT: pcmpgtd %xmm0, %xmm2 +; SSSE3-NEXT: pand %xmm2, %xmm0 +; SSSE3-NEXT: pxor %xmm4, %xmm2 +; SSSE3-NEXT: psrld $16, %xmm2 +; SSSE3-NEXT: por %xmm0, %xmm2 +; SSSE3-NEXT: pxor %xmm1, %xmm1 +; SSSE3-NEXT: movdqa %xmm2, %xmm0 +; SSSE3-NEXT: pcmpgtd %xmm1, %xmm0 +; SSSE3-NEXT: pand %xmm2, %xmm0 +; SSSE3-NEXT: movdqa %xmm3, %xmm2 +; SSSE3-NEXT: pcmpgtd %xmm1, %xmm2 +; SSSE3-NEXT: pand %xmm3, %xmm2 +; SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] +; SSSE3-NEXT: pshufb %xmm1, %xmm2 +; SSSE3-NEXT: pshufb %xmm1, %xmm0 +; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0] ; SSSE3-NEXT: retq ; ; SSE41-LABEL: trunc_packus_v8i32_v8i16: @@ -1201,37 +1205,41 @@ define <16 x i16> @trunc_packus_v16i32_v16i16(<16 x i32> %a0) { ; SSE2-LABEL: trunc_packus_v16i32_v16i16: ; SSE2: # %bb.0: -; SSE2-NEXT: movdqa {{.*#+}} xmm6 = [65535,65535,65535,65535] -; SSE2-NEXT: movdqa %xmm6, %xmm4 +; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [65535,65535,65535,65535] +; SSE2-NEXT: movdqa %xmm5, %xmm4 ; SSE2-NEXT: pcmpgtd %xmm1, %xmm4 ; SSE2-NEXT: pand %xmm4, %xmm1 -; SSE2-NEXT: pandn %xmm6, %xmm4 +; SSE2-NEXT: pcmpeqd %xmm7, %xmm7 +; SSE2-NEXT: pxor %xmm7, %xmm4 +; SSE2-NEXT: psrld $16, %xmm4 ; SSE2-NEXT: por %xmm1, %xmm4 -; SSE2-NEXT: movdqa %xmm6, %xmm5 -; SSE2-NEXT: pcmpgtd %xmm0, %xmm5 -; SSE2-NEXT: pand %xmm5, %xmm0 -; SSE2-NEXT: pandn %xmm6, %xmm5 -; SSE2-NEXT: por %xmm0, %xmm5 -; SSE2-NEXT: movdqa %xmm6, %xmm0 +; SSE2-NEXT: movdqa %xmm5, %xmm6 +; SSE2-NEXT: pcmpgtd %xmm0, %xmm6 +; SSE2-NEXT: pand %xmm6, %xmm0 +; SSE2-NEXT: pxor %xmm7, %xmm6 +; SSE2-NEXT: psrld $16, %xmm6 +; SSE2-NEXT: por %xmm0, %xmm6 +; SSE2-NEXT: movdqa %xmm5, %xmm0 ; SSE2-NEXT: pcmpgtd %xmm3, %xmm0 ; SSE2-NEXT: pand %xmm0, %xmm3 -; SSE2-NEXT: pandn %xmm6, %xmm0 +; SSE2-NEXT: pxor %xmm7, %xmm0 +; SSE2-NEXT: psrld $16, %xmm0 ; SSE2-NEXT: por %xmm3, %xmm0 -; SSE2-NEXT: movdqa %xmm6, %xmm3 -; SSE2-NEXT: pcmpgtd %xmm2, %xmm3 -; SSE2-NEXT: pand %xmm3, %xmm2 -; SSE2-NEXT: pandn %xmm6, %xmm3 -; SSE2-NEXT: por %xmm2, %xmm3 +; SSE2-NEXT: pcmpgtd %xmm2, %xmm5 +; SSE2-NEXT: pand %xmm5, %xmm2 +; SSE2-NEXT: pxor %xmm7, %xmm5 +; SSE2-NEXT: psrld $16, %xmm5 +; SSE2-NEXT: por %xmm2, %xmm5 ; SSE2-NEXT: pxor %xmm2, %xmm2 -; SSE2-NEXT: movdqa %xmm3, %xmm1 +; SSE2-NEXT: movdqa %xmm5, %xmm1 ; SSE2-NEXT: pcmpgtd %xmm2, %xmm1 -; SSE2-NEXT: pand %xmm3, %xmm1 +; SSE2-NEXT: pand %xmm5, %xmm1 ; SSE2-NEXT: movdqa %xmm0, %xmm3 ; SSE2-NEXT: pcmpgtd %xmm2, %xmm3 ; SSE2-NEXT: pand %xmm0, %xmm3 -; SSE2-NEXT: movdqa %xmm5, %xmm0 +; SSE2-NEXT: movdqa %xmm6, %xmm0 ; SSE2-NEXT: pcmpgtd %xmm2, %xmm0 -; SSE2-NEXT: pand %xmm5, %xmm0 +; SSE2-NEXT: pand %xmm6, %xmm0 ; SSE2-NEXT: movdqa %xmm4, %xmm5 ; SSE2-NEXT: pcmpgtd %xmm2, %xmm5 ; SSE2-NEXT: pand %xmm4, %xmm5 @@ -1249,37 +1257,41 @@ ; ; SSSE3-LABEL: trunc_packus_v16i32_v16i16: ; SSSE3: # %bb.0: -; SSSE3-NEXT: movdqa {{.*#+}} xmm6 = [65535,65535,65535,65535] -; SSSE3-NEXT: movdqa %xmm6, %xmm4 +; SSSE3-NEXT: movdqa {{.*#+}} xmm5 = [65535,65535,65535,65535] +; SSSE3-NEXT: movdqa %xmm5, %xmm4 ; SSSE3-NEXT: pcmpgtd %xmm1, %xmm4 ; SSSE3-NEXT: pand %xmm4, %xmm1 -; SSSE3-NEXT: pandn %xmm6, %xmm4 +; SSSE3-NEXT: pcmpeqd %xmm7, %xmm7 +; SSSE3-NEXT: pxor %xmm7, %xmm4 +; SSSE3-NEXT: psrld $16, %xmm4 ; SSSE3-NEXT: por %xmm1, %xmm4 -; SSSE3-NEXT: movdqa %xmm6, %xmm5 -; SSSE3-NEXT: pcmpgtd %xmm0, %xmm5 -; SSSE3-NEXT: pand %xmm5, %xmm0 -; SSSE3-NEXT: pandn %xmm6, %xmm5 -; SSSE3-NEXT: por %xmm0, %xmm5 -; SSSE3-NEXT: movdqa %xmm6, %xmm0 +; SSSE3-NEXT: movdqa %xmm5, %xmm6 +; SSSE3-NEXT: pcmpgtd %xmm0, %xmm6 +; SSSE3-NEXT: pand %xmm6, %xmm0 +; SSSE3-NEXT: pxor %xmm7, %xmm6 +; SSSE3-NEXT: psrld $16, %xmm6 +; SSSE3-NEXT: por %xmm0, %xmm6 +; SSSE3-NEXT: movdqa %xmm5, %xmm0 ; SSSE3-NEXT: pcmpgtd %xmm3, %xmm0 ; SSSE3-NEXT: pand %xmm0, %xmm3 -; SSSE3-NEXT: pandn %xmm6, %xmm0 +; SSSE3-NEXT: pxor %xmm7, %xmm0 +; SSSE3-NEXT: psrld $16, %xmm0 ; SSSE3-NEXT: por %xmm3, %xmm0 -; SSSE3-NEXT: movdqa %xmm6, %xmm3 -; SSSE3-NEXT: pcmpgtd %xmm2, %xmm3 -; SSSE3-NEXT: pand %xmm3, %xmm2 -; SSSE3-NEXT: pandn %xmm6, %xmm3 -; SSSE3-NEXT: por %xmm2, %xmm3 +; SSSE3-NEXT: pcmpgtd %xmm2, %xmm5 +; SSSE3-NEXT: pand %xmm5, %xmm2 +; SSSE3-NEXT: pxor %xmm7, %xmm5 +; SSSE3-NEXT: psrld $16, %xmm5 +; SSSE3-NEXT: por %xmm2, %xmm5 ; SSSE3-NEXT: pxor %xmm2, %xmm2 -; SSSE3-NEXT: movdqa %xmm3, %xmm1 +; SSSE3-NEXT: movdqa %xmm5, %xmm1 ; SSSE3-NEXT: pcmpgtd %xmm2, %xmm1 -; SSSE3-NEXT: pand %xmm3, %xmm1 +; SSSE3-NEXT: pand %xmm5, %xmm1 ; SSSE3-NEXT: movdqa %xmm0, %xmm3 ; SSSE3-NEXT: pcmpgtd %xmm2, %xmm3 ; SSSE3-NEXT: pand %xmm0, %xmm3 -; SSSE3-NEXT: movdqa %xmm5, %xmm0 +; SSSE3-NEXT: movdqa %xmm6, %xmm0 ; SSSE3-NEXT: pcmpgtd %xmm2, %xmm0 -; SSSE3-NEXT: pand %xmm5, %xmm0 +; SSSE3-NEXT: pand %xmm6, %xmm0 ; SSSE3-NEXT: movdqa %xmm4, %xmm5 ; SSSE3-NEXT: pcmpgtd %xmm2, %xmm5 ; SSSE3-NEXT: pand %xmm4, %xmm5 @@ -2070,24 +2082,25 @@ ; AVX1-NEXT: vblendvpd %ymm3, %ymm1, %ymm2, %ymm1 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3 ; AVX1-NEXT: vpcmpgtq %xmm3, %xmm4, %xmm3 -; AVX1-NEXT: vpcmpgtq %xmm0, %xmm4, %xmm5 -; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm5, %ymm3 +; AVX1-NEXT: vpcmpgtq %xmm0, %xmm4, %xmm4 +; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3 ; AVX1-NEXT: vblendvpd %ymm3, %ymm0, %ymm2, %ymm0 ; AVX1-NEXT: vxorpd %xmm2, %xmm2, %xmm2 ; AVX1-NEXT: vpcmpgtq %xmm2, %xmm0, %xmm8 -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5 -; AVX1-NEXT: vpcmpgtq %xmm2, %xmm5, %xmm6 -; AVX1-NEXT: vpcmpgtq %xmm2, %xmm1, %xmm7 -; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3 -; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2 -; AVX1-NEXT: vpand %xmm4, %xmm3, %xmm3 -; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2 -; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm1 -; AVX1-NEXT: vpand %xmm1, %xmm7, %xmm1 +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4 +; AVX1-NEXT: vpcmpgtq %xmm2, %xmm4, %xmm5 +; AVX1-NEXT: vpcmpgtq %xmm2, %xmm1, %xmm6 +; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm7 +; AVX1-NEXT: vpcmpgtq %xmm2, %xmm7, %xmm2 +; AVX1-NEXT: vmovddup {{.*#+}} xmm3 = mem[0,0] +; AVX1-NEXT: vpand %xmm3, %xmm7, %xmm7 +; AVX1-NEXT: vpand %xmm7, %xmm2, %xmm2 +; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1 +; AVX1-NEXT: vpand %xmm1, %xmm6, %xmm1 ; AVX1-NEXT: vpackusdw %xmm2, %xmm1, %xmm1 -; AVX1-NEXT: vpand %xmm4, %xmm5, %xmm2 -; AVX1-NEXT: vpand %xmm2, %xmm6, %xmm2 -; AVX1-NEXT: vpand %xmm4, %xmm0, %xmm0 +; AVX1-NEXT: vpand %xmm3, %xmm4, %xmm2 +; AVX1-NEXT: vpand %xmm2, %xmm5, %xmm2 +; AVX1-NEXT: vpand %xmm3, %xmm0, %xmm0 ; AVX1-NEXT: vpand %xmm0, %xmm8, %xmm0 ; AVX1-NEXT: vpackusdw %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0 @@ -2872,21 +2885,23 @@ ; SSE2-NEXT: movdqa %xmm2, %xmm3 ; SSE2-NEXT: pcmpgtd %xmm0, %xmm3 ; SSE2-NEXT: pand %xmm3, %xmm0 -; SSE2-NEXT: pandn %xmm2, %xmm3 +; SSE2-NEXT: pcmpeqd %xmm4, %xmm4 +; SSE2-NEXT: pxor %xmm4, %xmm3 +; SSE2-NEXT: psrld $24, %xmm3 ; SSE2-NEXT: por %xmm0, %xmm3 -; SSE2-NEXT: movdqa %xmm2, %xmm0 -; SSE2-NEXT: pcmpgtd %xmm1, %xmm0 -; SSE2-NEXT: pand %xmm0, %xmm1 -; SSE2-NEXT: pandn %xmm2, %xmm0 -; SSE2-NEXT: por %xmm1, %xmm0 -; SSE2-NEXT: pxor %xmm1, %xmm1 -; SSE2-NEXT: movdqa %xmm0, %xmm2 ; SSE2-NEXT: pcmpgtd %xmm1, %xmm2 -; SSE2-NEXT: pand %xmm0, %xmm2 +; SSE2-NEXT: pand %xmm2, %xmm1 +; SSE2-NEXT: pxor %xmm4, %xmm2 +; SSE2-NEXT: psrld $24, %xmm2 +; SSE2-NEXT: por %xmm1, %xmm2 +; SSE2-NEXT: pxor %xmm1, %xmm1 +; SSE2-NEXT: movdqa %xmm2, %xmm4 +; SSE2-NEXT: pcmpgtd %xmm1, %xmm4 +; SSE2-NEXT: pand %xmm2, %xmm4 ; SSE2-NEXT: movdqa %xmm3, %xmm0 ; SSE2-NEXT: pcmpgtd %xmm1, %xmm0 ; SSE2-NEXT: pand %xmm3, %xmm0 -; SSE2-NEXT: packuswb %xmm2, %xmm0 +; SSE2-NEXT: packuswb %xmm4, %xmm0 ; SSE2-NEXT: retq ; ; SSSE3-LABEL: trunc_packus_v8i32_v8i8: @@ -2895,21 +2910,23 @@ ; SSSE3-NEXT: movdqa %xmm2, %xmm3 ; SSSE3-NEXT: pcmpgtd %xmm0, %xmm3 ; SSSE3-NEXT: pand %xmm3, %xmm0 -; SSSE3-NEXT: pandn %xmm2, %xmm3 +; SSSE3-NEXT: pcmpeqd %xmm4, %xmm4 +; SSSE3-NEXT: pxor %xmm4, %xmm3 +; SSSE3-NEXT: psrld $24, %xmm3 ; SSSE3-NEXT: por %xmm0, %xmm3 -; SSSE3-NEXT: movdqa %xmm2, %xmm0 -; SSSE3-NEXT: pcmpgtd %xmm1, %xmm0 -; SSSE3-NEXT: pand %xmm0, %xmm1 -; SSSE3-NEXT: pandn %xmm2, %xmm0 -; SSSE3-NEXT: por %xmm1, %xmm0 -; SSSE3-NEXT: pxor %xmm1, %xmm1 -; SSSE3-NEXT: movdqa %xmm0, %xmm2 ; SSSE3-NEXT: pcmpgtd %xmm1, %xmm2 -; SSSE3-NEXT: pand %xmm0, %xmm2 +; SSSE3-NEXT: pand %xmm2, %xmm1 +; SSSE3-NEXT: pxor %xmm4, %xmm2 +; SSSE3-NEXT: psrld $24, %xmm2 +; SSSE3-NEXT: por %xmm1, %xmm2 +; SSSE3-NEXT: pxor %xmm1, %xmm1 +; SSSE3-NEXT: movdqa %xmm2, %xmm4 +; SSSE3-NEXT: pcmpgtd %xmm1, %xmm4 +; SSSE3-NEXT: pand %xmm2, %xmm4 ; SSSE3-NEXT: movdqa %xmm3, %xmm0 ; SSSE3-NEXT: pcmpgtd %xmm1, %xmm0 ; SSSE3-NEXT: pand %xmm3, %xmm0 -; SSSE3-NEXT: packuswb %xmm2, %xmm0 +; SSSE3-NEXT: packuswb %xmm4, %xmm0 ; SSSE3-NEXT: retq ; ; SSE41-LABEL: trunc_packus_v8i32_v8i8: Index: test/CodeGen/X86/vector-trunc-ssat.ll =================================================================== --- test/CodeGen/X86/vector-trunc-ssat.ll +++ test/CodeGen/X86/vector-trunc-ssat.ll @@ -2001,7 +2001,7 @@ ; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3 ; AVX1-NEXT: vblendvpd %ymm3, %ymm1, %ymm2, %ymm1 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 -; AVX1-NEXT: vmovapd {{.*#+}} xmm3 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0] +; AVX1-NEXT: vmovddup {{.*#+}} xmm3 = mem[0,0] ; AVX1-NEXT: vandpd %xmm3, %xmm2, %xmm2 ; AVX1-NEXT: vandpd %xmm3, %xmm1, %xmm1 ; AVX1-NEXT: vpackusdw %xmm2, %xmm1, %xmm1 @@ -2824,56 +2824,60 @@ define <8 x i8> @trunc_ssat_v8i32_v8i8(<8 x i32> %a0) { ; SSE2-LABEL: trunc_ssat_v8i32_v8i8: ; SSE2: # %bb.0: -; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [127,127,127,127] -; SSE2-NEXT: movdqa %xmm2, %xmm3 -; SSE2-NEXT: pcmpgtd %xmm0, %xmm3 -; SSE2-NEXT: pand %xmm3, %xmm0 -; SSE2-NEXT: pandn %xmm2, %xmm3 -; SSE2-NEXT: por %xmm0, %xmm3 -; SSE2-NEXT: movdqa %xmm2, %xmm0 -; SSE2-NEXT: pcmpgtd %xmm1, %xmm0 -; SSE2-NEXT: pand %xmm0, %xmm1 -; SSE2-NEXT: pandn %xmm2, %xmm0 -; SSE2-NEXT: por %xmm1, %xmm0 -; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [4294967168,4294967168,4294967168,4294967168] -; SSE2-NEXT: movdqa %xmm0, %xmm2 -; SSE2-NEXT: pcmpgtd %xmm1, %xmm2 +; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [127,127,127,127] +; SSE2-NEXT: movdqa %xmm3, %xmm2 +; SSE2-NEXT: pcmpgtd %xmm0, %xmm2 ; SSE2-NEXT: pand %xmm2, %xmm0 -; SSE2-NEXT: pandn %xmm1, %xmm2 +; SSE2-NEXT: pcmpeqd %xmm4, %xmm4 +; SSE2-NEXT: pxor %xmm4, %xmm2 +; SSE2-NEXT: psrld $25, %xmm2 ; SSE2-NEXT: por %xmm0, %xmm2 -; SSE2-NEXT: movdqa %xmm3, %xmm0 +; SSE2-NEXT: pcmpgtd %xmm1, %xmm3 +; SSE2-NEXT: pand %xmm3, %xmm1 +; SSE2-NEXT: pxor %xmm4, %xmm3 +; SSE2-NEXT: psrld $25, %xmm3 +; SSE2-NEXT: por %xmm1, %xmm3 +; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [4294967168,4294967168,4294967168,4294967168] +; SSE2-NEXT: movdqa %xmm3, %xmm4 +; SSE2-NEXT: pcmpgtd %xmm1, %xmm4 +; SSE2-NEXT: pand %xmm4, %xmm3 +; SSE2-NEXT: pandn %xmm1, %xmm4 +; SSE2-NEXT: por %xmm3, %xmm4 +; SSE2-NEXT: movdqa %xmm2, %xmm0 ; SSE2-NEXT: pcmpgtd %xmm1, %xmm0 -; SSE2-NEXT: pand %xmm0, %xmm3 +; SSE2-NEXT: pand %xmm0, %xmm2 ; SSE2-NEXT: pandn %xmm1, %xmm0 -; SSE2-NEXT: por %xmm3, %xmm0 -; SSE2-NEXT: packssdw %xmm2, %xmm0 +; SSE2-NEXT: por %xmm2, %xmm0 +; SSE2-NEXT: packssdw %xmm4, %xmm0 ; SSE2-NEXT: retq ; ; SSSE3-LABEL: trunc_ssat_v8i32_v8i8: ; SSSE3: # %bb.0: -; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [127,127,127,127] -; SSSE3-NEXT: movdqa %xmm2, %xmm3 -; SSSE3-NEXT: pcmpgtd %xmm0, %xmm3 -; SSSE3-NEXT: pand %xmm3, %xmm0 -; SSSE3-NEXT: pandn %xmm2, %xmm3 -; SSSE3-NEXT: por %xmm0, %xmm3 -; SSSE3-NEXT: movdqa %xmm2, %xmm0 -; SSSE3-NEXT: pcmpgtd %xmm1, %xmm0 -; SSSE3-NEXT: pand %xmm0, %xmm1 -; SSSE3-NEXT: pandn %xmm2, %xmm0 -; SSSE3-NEXT: por %xmm1, %xmm0 -; SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [4294967168,4294967168,4294967168,4294967168] -; SSSE3-NEXT: movdqa %xmm0, %xmm2 -; SSSE3-NEXT: pcmpgtd %xmm1, %xmm2 +; SSSE3-NEXT: movdqa {{.*#+}} xmm3 = [127,127,127,127] +; SSSE3-NEXT: movdqa %xmm3, %xmm2 +; SSSE3-NEXT: pcmpgtd %xmm0, %xmm2 ; SSSE3-NEXT: pand %xmm2, %xmm0 -; SSSE3-NEXT: pandn %xmm1, %xmm2 +; SSSE3-NEXT: pcmpeqd %xmm4, %xmm4 +; SSSE3-NEXT: pxor %xmm4, %xmm2 +; SSSE3-NEXT: psrld $25, %xmm2 ; SSSE3-NEXT: por %xmm0, %xmm2 -; SSSE3-NEXT: movdqa %xmm3, %xmm0 +; SSSE3-NEXT: pcmpgtd %xmm1, %xmm3 +; SSSE3-NEXT: pand %xmm3, %xmm1 +; SSSE3-NEXT: pxor %xmm4, %xmm3 +; SSSE3-NEXT: psrld $25, %xmm3 +; SSSE3-NEXT: por %xmm1, %xmm3 +; SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [4294967168,4294967168,4294967168,4294967168] +; SSSE3-NEXT: movdqa %xmm3, %xmm4 +; SSSE3-NEXT: pcmpgtd %xmm1, %xmm4 +; SSSE3-NEXT: pand %xmm4, %xmm3 +; SSSE3-NEXT: pandn %xmm1, %xmm4 +; SSSE3-NEXT: por %xmm3, %xmm4 +; SSSE3-NEXT: movdqa %xmm2, %xmm0 ; SSSE3-NEXT: pcmpgtd %xmm1, %xmm0 -; SSSE3-NEXT: pand %xmm0, %xmm3 +; SSSE3-NEXT: pand %xmm0, %xmm2 ; SSSE3-NEXT: pandn %xmm1, %xmm0 -; SSSE3-NEXT: por %xmm3, %xmm0 -; SSSE3-NEXT: packssdw %xmm2, %xmm0 +; SSSE3-NEXT: por %xmm2, %xmm0 +; SSSE3-NEXT: packssdw %xmm4, %xmm0 ; SSSE3-NEXT: retq ; ; SSE41-LABEL: trunc_ssat_v8i32_v8i8: Index: test/CodeGen/X86/vector-trunc-usat.ll =================================================================== --- test/CodeGen/X86/vector-trunc-usat.ll +++ test/CodeGen/X86/vector-trunc-usat.ll @@ -716,49 +716,53 @@ define <8 x i16> @trunc_usat_v8i32_v8i16(<8 x i32> %a0) { ; SSE2-LABEL: trunc_usat_v8i32_v8i16: ; SSE2: # %bb.0: -; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [65535,65535,65535,65535] ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [2147483648,2147483648,2147483648,2147483648] ; SSE2-NEXT: movdqa %xmm0, %xmm4 ; SSE2-NEXT: pxor %xmm3, %xmm4 -; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [2147549183,2147549183,2147549183,2147549183] -; SSE2-NEXT: movdqa %xmm5, %xmm6 -; SSE2-NEXT: pcmpgtd %xmm4, %xmm6 -; SSE2-NEXT: pand %xmm6, %xmm0 -; SSE2-NEXT: pandn %xmm2, %xmm6 -; SSE2-NEXT: por %xmm6, %xmm0 +; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147549183,2147549183,2147549183,2147549183] +; SSE2-NEXT: movdqa %xmm2, %xmm5 +; SSE2-NEXT: pcmpgtd %xmm4, %xmm5 +; SSE2-NEXT: pand %xmm5, %xmm0 +; SSE2-NEXT: pcmpeqd %xmm4, %xmm4 +; SSE2-NEXT: pxor %xmm4, %xmm5 +; SSE2-NEXT: psrld $16, %xmm5 +; SSE2-NEXT: por %xmm5, %xmm0 ; SSE2-NEXT: pxor %xmm1, %xmm3 -; SSE2-NEXT: pcmpgtd %xmm3, %xmm5 -; SSE2-NEXT: pand %xmm5, %xmm1 -; SSE2-NEXT: pandn %xmm2, %xmm5 -; SSE2-NEXT: por %xmm1, %xmm5 -; SSE2-NEXT: pslld $16, %xmm5 -; SSE2-NEXT: psrad $16, %xmm5 +; SSE2-NEXT: pcmpgtd %xmm3, %xmm2 +; SSE2-NEXT: pand %xmm2, %xmm1 +; SSE2-NEXT: pxor %xmm4, %xmm2 +; SSE2-NEXT: psrld $16, %xmm2 +; SSE2-NEXT: por %xmm1, %xmm2 +; SSE2-NEXT: pslld $16, %xmm2 +; SSE2-NEXT: psrad $16, %xmm2 ; SSE2-NEXT: pslld $16, %xmm0 ; SSE2-NEXT: psrad $16, %xmm0 -; SSE2-NEXT: packssdw %xmm5, %xmm0 +; SSE2-NEXT: packssdw %xmm2, %xmm0 ; SSE2-NEXT: retq ; ; SSSE3-LABEL: trunc_usat_v8i32_v8i16: ; SSSE3: # %bb.0: -; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [65535,65535,65535,65535] -; SSSE3-NEXT: movdqa {{.*#+}} xmm3 = [2147483648,2147483648,2147483648,2147483648] -; SSSE3-NEXT: movdqa %xmm0, %xmm4 -; SSSE3-NEXT: pxor %xmm3, %xmm4 -; SSSE3-NEXT: movdqa {{.*#+}} xmm5 = [2147549183,2147549183,2147549183,2147549183] -; SSSE3-NEXT: movdqa %xmm5, %xmm6 -; SSSE3-NEXT: pcmpgtd %xmm4, %xmm6 -; SSSE3-NEXT: pand %xmm6, %xmm0 -; SSSE3-NEXT: pandn %xmm2, %xmm6 -; SSSE3-NEXT: por %xmm6, %xmm0 -; SSSE3-NEXT: pxor %xmm1, %xmm3 +; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648] +; SSSE3-NEXT: movdqa %xmm0, %xmm3 +; SSSE3-NEXT: pxor %xmm2, %xmm3 +; SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [2147549183,2147549183,2147549183,2147549183] +; SSSE3-NEXT: movdqa %xmm4, %xmm5 ; SSSE3-NEXT: pcmpgtd %xmm3, %xmm5 -; SSSE3-NEXT: pand %xmm5, %xmm1 -; SSSE3-NEXT: pandn %xmm2, %xmm5 -; SSSE3-NEXT: por %xmm1, %xmm5 +; SSSE3-NEXT: pand %xmm5, %xmm0 +; SSSE3-NEXT: pcmpeqd %xmm3, %xmm3 +; SSSE3-NEXT: pxor %xmm3, %xmm5 +; SSSE3-NEXT: psrld $16, %xmm5 +; SSSE3-NEXT: por %xmm5, %xmm0 +; SSSE3-NEXT: pxor %xmm1, %xmm2 +; SSSE3-NEXT: pcmpgtd %xmm2, %xmm4 +; SSSE3-NEXT: pand %xmm4, %xmm1 +; SSSE3-NEXT: pxor %xmm3, %xmm4 +; SSSE3-NEXT: psrld $16, %xmm4 +; SSSE3-NEXT: por %xmm1, %xmm4 ; SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] -; SSSE3-NEXT: pshufb %xmm1, %xmm5 +; SSSE3-NEXT: pshufb %xmm1, %xmm4 ; SSSE3-NEXT: pshufb %xmm1, %xmm0 -; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm5[0] +; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm4[0] ; SSSE3-NEXT: retq ; ; SSE41-LABEL: trunc_usat_v8i32_v8i16: @@ -827,34 +831,38 @@ ; SSE2-LABEL: trunc_usat_v16i32_v16i16: ; SSE2: # %bb.0: ; SSE2-NEXT: movdqa %xmm1, %xmm4 -; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [65535,65535,65535,65535] -; SSE2-NEXT: movdqa {{.*#+}} xmm7 = [2147483648,2147483648,2147483648,2147483648] -; SSE2-NEXT: movdqa %xmm2, %xmm6 -; SSE2-NEXT: pxor %xmm7, %xmm6 +; SSE2-NEXT: movdqa {{.*#+}} xmm6 = [2147483648,2147483648,2147483648,2147483648] +; SSE2-NEXT: movdqa %xmm2, %xmm7 +; SSE2-NEXT: pxor %xmm6, %xmm7 ; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [2147549183,2147549183,2147549183,2147549183] ; SSE2-NEXT: movdqa %xmm5, %xmm1 -; SSE2-NEXT: pcmpgtd %xmm6, %xmm1 +; SSE2-NEXT: pcmpgtd %xmm7, %xmm1 ; SSE2-NEXT: pand %xmm1, %xmm2 -; SSE2-NEXT: pandn %xmm8, %xmm1 +; SSE2-NEXT: pcmpeqd %xmm8, %xmm8 +; SSE2-NEXT: pxor %xmm8, %xmm1 +; SSE2-NEXT: psrld $16, %xmm1 ; SSE2-NEXT: por %xmm2, %xmm1 -; SSE2-NEXT: movdqa %xmm3, %xmm6 -; SSE2-NEXT: pxor %xmm7, %xmm6 +; SSE2-NEXT: movdqa %xmm3, %xmm7 +; SSE2-NEXT: pxor %xmm6, %xmm7 ; SSE2-NEXT: movdqa %xmm5, %xmm2 -; SSE2-NEXT: pcmpgtd %xmm6, %xmm2 +; SSE2-NEXT: pcmpgtd %xmm7, %xmm2 ; SSE2-NEXT: pand %xmm2, %xmm3 -; SSE2-NEXT: pandn %xmm8, %xmm2 +; SSE2-NEXT: pxor %xmm8, %xmm2 +; SSE2-NEXT: psrld $16, %xmm2 ; SSE2-NEXT: por %xmm3, %xmm2 ; SSE2-NEXT: movdqa %xmm0, %xmm3 -; SSE2-NEXT: pxor %xmm7, %xmm3 -; SSE2-NEXT: movdqa %xmm5, %xmm6 -; SSE2-NEXT: pcmpgtd %xmm3, %xmm6 -; SSE2-NEXT: pand %xmm6, %xmm0 -; SSE2-NEXT: pandn %xmm8, %xmm6 -; SSE2-NEXT: por %xmm6, %xmm0 -; SSE2-NEXT: pxor %xmm4, %xmm7 -; SSE2-NEXT: pcmpgtd %xmm7, %xmm5 +; SSE2-NEXT: pxor %xmm6, %xmm3 +; SSE2-NEXT: movdqa %xmm5, %xmm7 +; SSE2-NEXT: pcmpgtd %xmm3, %xmm7 +; SSE2-NEXT: pand %xmm7, %xmm0 +; SSE2-NEXT: pxor %xmm8, %xmm7 +; SSE2-NEXT: psrld $16, %xmm7 +; SSE2-NEXT: por %xmm7, %xmm0 +; SSE2-NEXT: pxor %xmm4, %xmm6 +; SSE2-NEXT: pcmpgtd %xmm6, %xmm5 ; SSE2-NEXT: pand %xmm5, %xmm4 -; SSE2-NEXT: pandn %xmm8, %xmm5 +; SSE2-NEXT: pxor %xmm8, %xmm5 +; SSE2-NEXT: psrld $16, %xmm5 ; SSE2-NEXT: por %xmm4, %xmm5 ; SSE2-NEXT: pslld $16, %xmm5 ; SSE2-NEXT: psrad $16, %xmm5 @@ -871,34 +879,38 @@ ; SSSE3-LABEL: trunc_usat_v16i32_v16i16: ; SSSE3: # %bb.0: ; SSSE3-NEXT: movdqa %xmm1, %xmm4 -; SSSE3-NEXT: movdqa {{.*#+}} xmm8 = [65535,65535,65535,65535] -; SSSE3-NEXT: movdqa {{.*#+}} xmm7 = [2147483648,2147483648,2147483648,2147483648] -; SSSE3-NEXT: movdqa %xmm2, %xmm6 -; SSSE3-NEXT: pxor %xmm7, %xmm6 +; SSSE3-NEXT: movdqa {{.*#+}} xmm6 = [2147483648,2147483648,2147483648,2147483648] +; SSSE3-NEXT: movdqa %xmm2, %xmm7 +; SSSE3-NEXT: pxor %xmm6, %xmm7 ; SSSE3-NEXT: movdqa {{.*#+}} xmm5 = [2147549183,2147549183,2147549183,2147549183] ; SSSE3-NEXT: movdqa %xmm5, %xmm1 -; SSSE3-NEXT: pcmpgtd %xmm6, %xmm1 +; SSSE3-NEXT: pcmpgtd %xmm7, %xmm1 ; SSSE3-NEXT: pand %xmm1, %xmm2 -; SSSE3-NEXT: pandn %xmm8, %xmm1 +; SSSE3-NEXT: pcmpeqd %xmm8, %xmm8 +; SSSE3-NEXT: pxor %xmm8, %xmm1 +; SSSE3-NEXT: psrld $16, %xmm1 ; SSSE3-NEXT: por %xmm2, %xmm1 -; SSSE3-NEXT: movdqa %xmm3, %xmm6 -; SSSE3-NEXT: pxor %xmm7, %xmm6 +; SSSE3-NEXT: movdqa %xmm3, %xmm7 +; SSSE3-NEXT: pxor %xmm6, %xmm7 ; SSSE3-NEXT: movdqa %xmm5, %xmm2 -; SSSE3-NEXT: pcmpgtd %xmm6, %xmm2 +; SSSE3-NEXT: pcmpgtd %xmm7, %xmm2 ; SSSE3-NEXT: pand %xmm2, %xmm3 -; SSSE3-NEXT: pandn %xmm8, %xmm2 +; SSSE3-NEXT: pxor %xmm8, %xmm2 +; SSSE3-NEXT: psrld $16, %xmm2 ; SSSE3-NEXT: por %xmm3, %xmm2 ; SSSE3-NEXT: movdqa %xmm0, %xmm3 -; SSSE3-NEXT: pxor %xmm7, %xmm3 -; SSSE3-NEXT: movdqa %xmm5, %xmm6 -; SSSE3-NEXT: pcmpgtd %xmm3, %xmm6 -; SSSE3-NEXT: pand %xmm6, %xmm0 -; SSSE3-NEXT: pandn %xmm8, %xmm6 -; SSSE3-NEXT: por %xmm6, %xmm0 -; SSSE3-NEXT: pxor %xmm4, %xmm7 -; SSSE3-NEXT: pcmpgtd %xmm7, %xmm5 +; SSSE3-NEXT: pxor %xmm6, %xmm3 +; SSSE3-NEXT: movdqa %xmm5, %xmm7 +; SSSE3-NEXT: pcmpgtd %xmm3, %xmm7 +; SSSE3-NEXT: pand %xmm7, %xmm0 +; SSSE3-NEXT: pxor %xmm8, %xmm7 +; SSSE3-NEXT: psrld $16, %xmm7 +; SSSE3-NEXT: por %xmm7, %xmm0 +; SSSE3-NEXT: pxor %xmm4, %xmm6 +; SSSE3-NEXT: pcmpgtd %xmm6, %xmm5 ; SSSE3-NEXT: pand %xmm5, %xmm4 -; SSSE3-NEXT: pandn %xmm8, %xmm5 +; SSSE3-NEXT: pxor %xmm8, %xmm5 +; SSSE3-NEXT: psrld $16, %xmm5 ; SSSE3-NEXT: por %xmm4, %xmm5 ; SSSE3-NEXT: pslld $16, %xmm5 ; SSSE3-NEXT: psrad $16, %xmm5 @@ -1417,7 +1429,7 @@ ; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3 ; AVX1-NEXT: vblendvpd %ymm3, %ymm1, %ymm2, %ymm1 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 -; AVX1-NEXT: vmovapd {{.*#+}} xmm3 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0] +; AVX1-NEXT: vmovddup {{.*#+}} xmm3 = mem[0,0] ; AVX1-NEXT: vandpd %xmm3, %xmm2, %xmm2 ; AVX1-NEXT: vandpd %xmm3, %xmm1, %xmm1 ; AVX1-NEXT: vpackusdw %xmm2, %xmm1, %xmm1 @@ -1907,42 +1919,46 @@ define <8 x i8> @trunc_usat_v8i32_v8i8(<8 x i32> %a0) { ; SSE2-LABEL: trunc_usat_v8i32_v8i8: ; SSE2: # %bb.0: -; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255] -; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [2147483648,2147483648,2147483648,2147483648] -; SSE2-NEXT: movdqa %xmm1, %xmm4 -; SSE2-NEXT: pxor %xmm3, %xmm4 -; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [2147483903,2147483903,2147483903,2147483903] -; SSE2-NEXT: movdqa %xmm5, %xmm6 -; SSE2-NEXT: pcmpgtd %xmm4, %xmm6 -; SSE2-NEXT: pand %xmm6, %xmm1 -; SSE2-NEXT: pandn %xmm2, %xmm6 -; SSE2-NEXT: por %xmm1, %xmm6 -; SSE2-NEXT: pxor %xmm0, %xmm3 +; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648] +; SSE2-NEXT: movdqa %xmm1, %xmm3 +; SSE2-NEXT: pxor %xmm2, %xmm3 +; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [2147483903,2147483903,2147483903,2147483903] +; SSE2-NEXT: movdqa %xmm4, %xmm5 ; SSE2-NEXT: pcmpgtd %xmm3, %xmm5 -; SSE2-NEXT: pand %xmm5, %xmm0 -; SSE2-NEXT: pandn %xmm2, %xmm5 -; SSE2-NEXT: por %xmm5, %xmm0 -; SSE2-NEXT: packuswb %xmm6, %xmm0 +; SSE2-NEXT: pand %xmm5, %xmm1 +; SSE2-NEXT: pcmpeqd %xmm3, %xmm3 +; SSE2-NEXT: pxor %xmm3, %xmm5 +; SSE2-NEXT: psrld $24, %xmm5 +; SSE2-NEXT: por %xmm1, %xmm5 +; SSE2-NEXT: pxor %xmm0, %xmm2 +; SSE2-NEXT: pcmpgtd %xmm2, %xmm4 +; SSE2-NEXT: pand %xmm4, %xmm0 +; SSE2-NEXT: pxor %xmm3, %xmm4 +; SSE2-NEXT: psrld $24, %xmm4 +; SSE2-NEXT: por %xmm4, %xmm0 +; SSE2-NEXT: packuswb %xmm5, %xmm0 ; SSE2-NEXT: retq ; ; SSSE3-LABEL: trunc_usat_v8i32_v8i8: ; SSSE3: # %bb.0: -; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255] -; SSSE3-NEXT: movdqa {{.*#+}} xmm3 = [2147483648,2147483648,2147483648,2147483648] -; SSSE3-NEXT: movdqa %xmm1, %xmm4 -; SSSE3-NEXT: pxor %xmm3, %xmm4 -; SSSE3-NEXT: movdqa {{.*#+}} xmm5 = [2147483903,2147483903,2147483903,2147483903] -; SSSE3-NEXT: movdqa %xmm5, %xmm6 -; SSSE3-NEXT: pcmpgtd %xmm4, %xmm6 -; SSSE3-NEXT: pand %xmm6, %xmm1 -; SSSE3-NEXT: pandn %xmm2, %xmm6 -; SSSE3-NEXT: por %xmm1, %xmm6 -; SSSE3-NEXT: pxor %xmm0, %xmm3 +; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648] +; SSSE3-NEXT: movdqa %xmm1, %xmm3 +; SSSE3-NEXT: pxor %xmm2, %xmm3 +; SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [2147483903,2147483903,2147483903,2147483903] +; SSSE3-NEXT: movdqa %xmm4, %xmm5 ; SSSE3-NEXT: pcmpgtd %xmm3, %xmm5 -; SSSE3-NEXT: pand %xmm5, %xmm0 -; SSSE3-NEXT: pandn %xmm2, %xmm5 -; SSSE3-NEXT: por %xmm5, %xmm0 -; SSSE3-NEXT: packuswb %xmm6, %xmm0 +; SSSE3-NEXT: pand %xmm5, %xmm1 +; SSSE3-NEXT: pcmpeqd %xmm3, %xmm3 +; SSSE3-NEXT: pxor %xmm3, %xmm5 +; SSSE3-NEXT: psrld $24, %xmm5 +; SSSE3-NEXT: por %xmm1, %xmm5 +; SSSE3-NEXT: pxor %xmm0, %xmm2 +; SSSE3-NEXT: pcmpgtd %xmm2, %xmm4 +; SSSE3-NEXT: pand %xmm4, %xmm0 +; SSSE3-NEXT: pxor %xmm3, %xmm4 +; SSSE3-NEXT: psrld $24, %xmm4 +; SSSE3-NEXT: por %xmm4, %xmm0 +; SSSE3-NEXT: packuswb %xmm5, %xmm0 ; SSSE3-NEXT: retq ; ; SSE41-LABEL: trunc_usat_v8i32_v8i8: @@ -2012,51 +2028,56 @@ define void @trunc_usat_v8i32_v8i8_store(<8 x i32> %a0, <8 x i8> *%p1) { ; SSE2-LABEL: trunc_usat_v8i32_v8i8_store: ; SSE2: # %bb.0: -; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255] -; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [2147483648,2147483648,2147483648,2147483648] -; SSE2-NEXT: movdqa %xmm0, %xmm4 -; SSE2-NEXT: pxor %xmm3, %xmm4 -; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [2147483903,2147483903,2147483903,2147483903] -; SSE2-NEXT: movdqa %xmm5, %xmm6 -; SSE2-NEXT: pcmpgtd %xmm4, %xmm6 -; SSE2-NEXT: pand %xmm6, %xmm0 -; SSE2-NEXT: pandn %xmm2, %xmm6 -; SSE2-NEXT: por %xmm0, %xmm6 -; SSE2-NEXT: pxor %xmm1, %xmm3 +; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648] +; SSE2-NEXT: movdqa %xmm0, %xmm3 +; SSE2-NEXT: pxor %xmm2, %xmm3 +; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [2147483903,2147483903,2147483903,2147483903] +; SSE2-NEXT: movdqa %xmm4, %xmm5 ; SSE2-NEXT: pcmpgtd %xmm3, %xmm5 -; SSE2-NEXT: pand %xmm5, %xmm1 -; SSE2-NEXT: pandn %xmm2, %xmm5 -; SSE2-NEXT: por %xmm1, %xmm5 -; SSE2-NEXT: pand %xmm2, %xmm5 -; SSE2-NEXT: pand %xmm2, %xmm6 -; SSE2-NEXT: packuswb %xmm5, %xmm6 -; SSE2-NEXT: packuswb %xmm6, %xmm6 -; SSE2-NEXT: movq %xmm6, (%rdi) +; SSE2-NEXT: pand %xmm5, %xmm0 +; SSE2-NEXT: pcmpeqd %xmm3, %xmm3 +; SSE2-NEXT: pxor %xmm3, %xmm5 +; SSE2-NEXT: psrld $24, %xmm5 +; SSE2-NEXT: por %xmm0, %xmm5 +; SSE2-NEXT: pxor %xmm1, %xmm2 +; SSE2-NEXT: pcmpgtd %xmm2, %xmm4 +; SSE2-NEXT: pand %xmm4, %xmm1 +; SSE2-NEXT: pxor %xmm3, %xmm4 +; SSE2-NEXT: psrld $24, %xmm4 +; SSE2-NEXT: por %xmm1, %xmm4 +; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0] +; SSE2-NEXT: pand %xmm0, %xmm4 +; SSE2-NEXT: pand %xmm0, %xmm5 +; SSE2-NEXT: packuswb %xmm4, %xmm5 +; SSE2-NEXT: packuswb %xmm5, %xmm5 +; SSE2-NEXT: movq %xmm5, (%rdi) ; SSE2-NEXT: retq ; ; SSSE3-LABEL: trunc_usat_v8i32_v8i8_store: ; SSSE3: # %bb.0: -; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255] -; SSSE3-NEXT: movdqa {{.*#+}} xmm3 = [2147483648,2147483648,2147483648,2147483648] -; SSSE3-NEXT: movdqa %xmm0, %xmm4 -; SSSE3-NEXT: pxor %xmm3, %xmm4 -; SSSE3-NEXT: movdqa {{.*#+}} xmm5 = [2147483903,2147483903,2147483903,2147483903] -; SSSE3-NEXT: movdqa %xmm5, %xmm6 -; SSSE3-NEXT: pcmpgtd %xmm4, %xmm6 -; SSSE3-NEXT: pand %xmm6, %xmm0 -; SSSE3-NEXT: pandn %xmm2, %xmm6 -; SSSE3-NEXT: por %xmm0, %xmm6 -; SSSE3-NEXT: pxor %xmm1, %xmm3 +; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648] +; SSSE3-NEXT: movdqa %xmm0, %xmm3 +; SSSE3-NEXT: pxor %xmm2, %xmm3 +; SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [2147483903,2147483903,2147483903,2147483903] +; SSSE3-NEXT: movdqa %xmm4, %xmm5 ; SSSE3-NEXT: pcmpgtd %xmm3, %xmm5 -; SSSE3-NEXT: pand %xmm5, %xmm1 -; SSSE3-NEXT: pandn %xmm2, %xmm5 -; SSSE3-NEXT: por %xmm1, %xmm5 +; SSSE3-NEXT: pand %xmm5, %xmm0 +; SSSE3-NEXT: pcmpeqd %xmm3, %xmm3 +; SSSE3-NEXT: pxor %xmm3, %xmm5 +; SSSE3-NEXT: psrld $24, %xmm5 +; SSSE3-NEXT: por %xmm0, %xmm5 +; SSSE3-NEXT: pxor %xmm1, %xmm2 +; SSSE3-NEXT: pcmpgtd %xmm2, %xmm4 +; SSSE3-NEXT: pand %xmm4, %xmm1 +; SSSE3-NEXT: pxor %xmm3, %xmm4 +; SSSE3-NEXT: psrld $24, %xmm4 +; SSSE3-NEXT: por %xmm1, %xmm4 ; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] +; SSSE3-NEXT: pshufb %xmm0, %xmm4 ; SSSE3-NEXT: pshufb %xmm0, %xmm5 -; SSSE3-NEXT: pshufb %xmm0, %xmm6 -; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm6 = xmm6[0],xmm5[0] -; SSSE3-NEXT: pshufb {{.*#+}} xmm6 = xmm6[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u] -; SSSE3-NEXT: movq %xmm6, (%rdi) +; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm4[0] +; SSSE3-NEXT: pshufb {{.*#+}} xmm5 = xmm5[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u] +; SSSE3-NEXT: movq %xmm5, (%rdi) ; SSSE3-NEXT: retq ; ; SSE41-LABEL: trunc_usat_v8i32_v8i8_store: @@ -2133,35 +2154,39 @@ define <16 x i8> @trunc_usat_v16i32_v16i8(<16 x i32> %a0) { ; SSE2-LABEL: trunc_usat_v16i32_v16i8: ; SSE2: # %bb.0: -; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [255,255,255,255] -; SSE2-NEXT: movdqa {{.*#+}} xmm6 = [2147483648,2147483648,2147483648,2147483648] -; SSE2-NEXT: movdqa %xmm1, %xmm7 -; SSE2-NEXT: pxor %xmm6, %xmm7 +; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [2147483648,2147483648,2147483648,2147483648] +; SSE2-NEXT: movdqa %xmm1, %xmm6 +; SSE2-NEXT: pxor %xmm8, %xmm6 ; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [2147483903,2147483903,2147483903,2147483903] -; SSE2-NEXT: movdqa %xmm5, %xmm4 -; SSE2-NEXT: pcmpgtd %xmm7, %xmm4 -; SSE2-NEXT: pand %xmm4, %xmm1 -; SSE2-NEXT: pandn %xmm8, %xmm4 -; SSE2-NEXT: por %xmm1, %xmm4 -; SSE2-NEXT: movdqa %xmm0, %xmm1 -; SSE2-NEXT: pxor %xmm6, %xmm1 ; SSE2-NEXT: movdqa %xmm5, %xmm7 -; SSE2-NEXT: pcmpgtd %xmm1, %xmm7 -; SSE2-NEXT: pand %xmm7, %xmm0 -; SSE2-NEXT: pandn %xmm8, %xmm7 -; SSE2-NEXT: por %xmm7, %xmm0 -; SSE2-NEXT: packuswb %xmm4, %xmm0 +; SSE2-NEXT: pcmpgtd %xmm6, %xmm7 +; SSE2-NEXT: pand %xmm7, %xmm1 +; SSE2-NEXT: pcmpeqd %xmm6, %xmm6 +; SSE2-NEXT: pxor %xmm6, %xmm7 +; SSE2-NEXT: psrld $24, %xmm7 +; SSE2-NEXT: por %xmm1, %xmm7 +; SSE2-NEXT: movdqa %xmm0, %xmm1 +; SSE2-NEXT: pxor %xmm8, %xmm1 +; SSE2-NEXT: movdqa %xmm5, %xmm4 +; SSE2-NEXT: pcmpgtd %xmm1, %xmm4 +; SSE2-NEXT: pand %xmm4, %xmm0 +; SSE2-NEXT: pxor %xmm6, %xmm4 +; SSE2-NEXT: psrld $24, %xmm4 +; SSE2-NEXT: por %xmm4, %xmm0 +; SSE2-NEXT: packuswb %xmm7, %xmm0 ; SSE2-NEXT: movdqa %xmm3, %xmm1 -; SSE2-NEXT: pxor %xmm6, %xmm1 +; SSE2-NEXT: pxor %xmm8, %xmm1 ; SSE2-NEXT: movdqa %xmm5, %xmm4 ; SSE2-NEXT: pcmpgtd %xmm1, %xmm4 ; SSE2-NEXT: pand %xmm4, %xmm3 -; SSE2-NEXT: pandn %xmm8, %xmm4 +; SSE2-NEXT: pxor %xmm6, %xmm4 +; SSE2-NEXT: psrld $24, %xmm4 ; SSE2-NEXT: por %xmm3, %xmm4 -; SSE2-NEXT: pxor %xmm2, %xmm6 -; SSE2-NEXT: pcmpgtd %xmm6, %xmm5 +; SSE2-NEXT: pxor %xmm2, %xmm8 +; SSE2-NEXT: pcmpgtd %xmm8, %xmm5 ; SSE2-NEXT: pand %xmm5, %xmm2 -; SSE2-NEXT: pandn %xmm8, %xmm5 +; SSE2-NEXT: pxor %xmm6, %xmm5 +; SSE2-NEXT: psrld $24, %xmm5 ; SSE2-NEXT: por %xmm2, %xmm5 ; SSE2-NEXT: packuswb %xmm4, %xmm5 ; SSE2-NEXT: packuswb %xmm5, %xmm0 @@ -2169,35 +2194,39 @@ ; ; SSSE3-LABEL: trunc_usat_v16i32_v16i8: ; SSSE3: # %bb.0: -; SSSE3-NEXT: movdqa {{.*#+}} xmm8 = [255,255,255,255] -; SSSE3-NEXT: movdqa {{.*#+}} xmm6 = [2147483648,2147483648,2147483648,2147483648] -; SSSE3-NEXT: movdqa %xmm1, %xmm7 -; SSSE3-NEXT: pxor %xmm6, %xmm7 +; SSSE3-NEXT: movdqa {{.*#+}} xmm8 = [2147483648,2147483648,2147483648,2147483648] +; SSSE3-NEXT: movdqa %xmm1, %xmm6 +; SSSE3-NEXT: pxor %xmm8, %xmm6 ; SSSE3-NEXT: movdqa {{.*#+}} xmm5 = [2147483903,2147483903,2147483903,2147483903] -; SSSE3-NEXT: movdqa %xmm5, %xmm4 -; SSSE3-NEXT: pcmpgtd %xmm7, %xmm4 -; SSSE3-NEXT: pand %xmm4, %xmm1 -; SSSE3-NEXT: pandn %xmm8, %xmm4 -; SSSE3-NEXT: por %xmm1, %xmm4 -; SSSE3-NEXT: movdqa %xmm0, %xmm1 -; SSSE3-NEXT: pxor %xmm6, %xmm1 ; SSSE3-NEXT: movdqa %xmm5, %xmm7 -; SSSE3-NEXT: pcmpgtd %xmm1, %xmm7 -; SSSE3-NEXT: pand %xmm7, %xmm0 -; SSSE3-NEXT: pandn %xmm8, %xmm7 -; SSSE3-NEXT: por %xmm7, %xmm0 -; SSSE3-NEXT: packuswb %xmm4, %xmm0 +; SSSE3-NEXT: pcmpgtd %xmm6, %xmm7 +; SSSE3-NEXT: pand %xmm7, %xmm1 +; SSSE3-NEXT: pcmpeqd %xmm6, %xmm6 +; SSSE3-NEXT: pxor %xmm6, %xmm7 +; SSSE3-NEXT: psrld $24, %xmm7 +; SSSE3-NEXT: por %xmm1, %xmm7 +; SSSE3-NEXT: movdqa %xmm0, %xmm1 +; SSSE3-NEXT: pxor %xmm8, %xmm1 +; SSSE3-NEXT: movdqa %xmm5, %xmm4 +; SSSE3-NEXT: pcmpgtd %xmm1, %xmm4 +; SSSE3-NEXT: pand %xmm4, %xmm0 +; SSSE3-NEXT: pxor %xmm6, %xmm4 +; SSSE3-NEXT: psrld $24, %xmm4 +; SSSE3-NEXT: por %xmm4, %xmm0 +; SSSE3-NEXT: packuswb %xmm7, %xmm0 ; SSSE3-NEXT: movdqa %xmm3, %xmm1 -; SSSE3-NEXT: pxor %xmm6, %xmm1 +; SSSE3-NEXT: pxor %xmm8, %xmm1 ; SSSE3-NEXT: movdqa %xmm5, %xmm4 ; SSSE3-NEXT: pcmpgtd %xmm1, %xmm4 ; SSSE3-NEXT: pand %xmm4, %xmm3 -; SSSE3-NEXT: pandn %xmm8, %xmm4 +; SSSE3-NEXT: pxor %xmm6, %xmm4 +; SSSE3-NEXT: psrld $24, %xmm4 ; SSSE3-NEXT: por %xmm3, %xmm4 -; SSSE3-NEXT: pxor %xmm2, %xmm6 -; SSSE3-NEXT: pcmpgtd %xmm6, %xmm5 +; SSSE3-NEXT: pxor %xmm2, %xmm8 +; SSSE3-NEXT: pcmpgtd %xmm8, %xmm5 ; SSSE3-NEXT: pand %xmm5, %xmm2 -; SSSE3-NEXT: pandn %xmm8, %xmm5 +; SSSE3-NEXT: pxor %xmm6, %xmm5 +; SSSE3-NEXT: psrld $24, %xmm5 ; SSSE3-NEXT: por %xmm2, %xmm5 ; SSSE3-NEXT: packuswb %xmm4, %xmm5 ; SSSE3-NEXT: packuswb %xmm5, %xmm0 Index: test/CodeGen/X86/vector-trunc.ll =================================================================== --- test/CodeGen/X86/vector-trunc.ll +++ test/CodeGen/X86/vector-trunc.ll @@ -286,13 +286,13 @@ ; AVX1-LABEL: trunc8i64_8i8: ; AVX1: # %bb.0: # %entry ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0] -; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2 -; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1 +; AVX1-NEXT: vmovddup {{.*#+}} xmm3 = mem[0,0] +; AVX1-NEXT: vandpd %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vandpd %xmm3, %xmm1, %xmm1 ; AVX1-NEXT: vpackusdw %xmm2, %xmm1, %xmm1 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 -; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2 -; AVX1-NEXT: vpand %xmm3, %xmm0, %xmm0 +; AVX1-NEXT: vandpd %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vandpd %xmm3, %xmm0, %xmm0 ; AVX1-NEXT: vpackusdw %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpackuswb %xmm0, %xmm0, %xmm0 @@ -907,13 +907,13 @@ ; AVX1-LABEL: trunc16i32_16i8: ; AVX1: # %bb.0: # %entry ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0] -; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2 -; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1 +; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [3.57331108E-43,3.57331108E-43,3.57331108E-43,3.57331108E-43] +; AVX1-NEXT: vandps %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vandps %xmm3, %xmm1, %xmm1 ; AVX1-NEXT: vpackusdw %xmm2, %xmm1, %xmm1 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 -; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2 -; AVX1-NEXT: vpand %xmm3, %xmm0, %xmm0 +; AVX1-NEXT: vandps %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vandps %xmm3, %xmm0, %xmm0 ; AVX1-NEXT: vpackusdw %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vmovdqu %xmm0, (%rax) Index: test/CodeGen/X86/vshift-6.ll =================================================================== --- test/CodeGen/X86/vshift-6.ll +++ test/CodeGen/X86/vshift-6.ll @@ -50,8 +50,8 @@ ; X32-NEXT: movdqa %xmm2, %xmm4 ; X32-NEXT: pandn %xmm0, %xmm4 ; X32-NEXT: psllw $2, %xmm0 -; X32-NEXT: pand {{\.LCPI.*}}, %xmm0 ; X32-NEXT: pand %xmm2, %xmm0 +; X32-NEXT: pand {{\.LCPI.*}}, %xmm0 ; X32-NEXT: por %xmm4, %xmm0 ; X32-NEXT: paddb %xmm1, %xmm1 ; X32-NEXT: pcmpgtb %xmm1, %xmm3 @@ -85,8 +85,8 @@ ; X64-NEXT: movdqa %xmm2, %xmm4 ; X64-NEXT: pandn %xmm0, %xmm4 ; X64-NEXT: psllw $2, %xmm0 -; X64-NEXT: pand {{.*}}(%rip), %xmm0 ; X64-NEXT: pand %xmm2, %xmm0 +; X64-NEXT: pand {{.*}}(%rip), %xmm0 ; X64-NEXT: por %xmm4, %xmm0 ; X64-NEXT: paddb %xmm1, %xmm1 ; X64-NEXT: pcmpgtb %xmm1, %xmm3 Index: test/CodeGen/X86/x86-interleaved-access.ll =================================================================== --- test/CodeGen/X86/x86-interleaved-access.ll +++ test/CodeGen/X86/x86-interleaved-access.ll @@ -1029,7 +1029,8 @@ ; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm1, %ymm2 ; AVX1-NEXT: vpalignr {{.*#+}} xmm9 = xmm7[11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7,8,9,10] ; AVX1-NEXT: vpalignr {{.*#+}} xmm4 = xmm6[11,12,13,14,15],xmm4[0,1,2,3,4,5,6,7,8,9,10] -; AVX1-NEXT: vmovaps {{.*#+}} ymm5 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0] +; AVX1-NEXT: vbroadcastf128 {{.*#+}} ymm5 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0] +; AVX1-NEXT: # ymm5 = mem[0,1,0,1] ; AVX1-NEXT: vandnps %ymm2, %ymm5, %ymm2 ; AVX1-NEXT: vandps %ymm5, %ymm8, %ymm5 ; AVX1-NEXT: vorps %ymm2, %ymm5, %ymm2 @@ -1585,13 +1586,14 @@ ; AVX1-NEXT: vpalignr {{.*#+}} xmm4 = xmm12[11,12,13,14,15],xmm4[0,1,2,3,4,5,6,7,8,9,10] ; AVX1-NEXT: vpalignr {{.*#+}} xmm11 = xmm11[11,12,13,14,15],xmm13[0,1,2,3,4,5,6,7,8,9,10] ; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm11, %ymm12 -; AVX1-NEXT: vmovaps {{.*#+}} ymm13 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0] +; AVX1-NEXT: vbroadcastf128 {{.*#+}} ymm13 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0] +; AVX1-NEXT: # ymm13 = mem[0,1,0,1] ; AVX1-NEXT: vandnps %ymm12, %ymm13, %ymm12 ; AVX1-NEXT: vandps %ymm13, %ymm14, %ymm14 ; AVX1-NEXT: vorps %ymm12, %ymm14, %ymm12 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm14 ; AVX1-NEXT: vpalignr {{.*#+}} xmm4 = xmm15[11,12,13,14,15],xmm4[0,1,2,3,4,5,6,7,8,9,10] -; AVX1-NEXT: vmovdqa %xmm4, -{{[0-9]+}}(%rsp) # 16-byte Spill +; AVX1-NEXT: vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX1-NEXT: vandnps %ymm14, %ymm13, %ymm14 ; AVX1-NEXT: vandps %ymm13, %ymm7, %ymm7 ; AVX1-NEXT: vorps %ymm14, %ymm7, %ymm13 @@ -1616,7 +1618,7 @@ ; AVX1-NEXT: vpaddb %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpaddb %xmm0, %xmm4, %xmm1 ; AVX1-NEXT: vextractf128 $1, %ymm12, %xmm0 -; AVX1-NEXT: vpaddb -{{[0-9]+}}(%rsp), %xmm0, %xmm0 # 16-byte Folded Reload +; AVX1-NEXT: vpaddb {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload ; AVX1-NEXT: vpaddb %xmm0, %xmm3, %xmm0 ; AVX1-NEXT: vpaddb %xmm11, %xmm12, %xmm3 ; AVX1-NEXT: vpaddb %xmm3, %xmm2, %xmm2 @@ -1732,22 +1734,22 @@ ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm14 ; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm15 = xmm14[0],xmm13[0],xmm14[1],xmm13[1],xmm14[2],xmm13[2],xmm14[3],xmm13[3],xmm14[4],xmm13[4],xmm14[5],xmm13[5],xmm14[6],xmm13[6],xmm14[7],xmm13[7] ; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15] -; AVX1-NEXT: vmovdqa %xmm0, -{{[0-9]+}}(%rsp) # 16-byte Spill +; AVX1-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm12[8],xmm11[8],xmm12[9],xmm11[9],xmm12[10],xmm11[10],xmm12[11],xmm11[11],xmm12[12],xmm11[12],xmm12[13],xmm11[13],xmm12[14],xmm11[14],xmm12[15],xmm11[15] -; AVX1-NEXT: vmovdqa %xmm0, -{{[0-9]+}}(%rsp) # 16-byte Spill +; AVX1-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm1[8],xmm3[8],xmm1[9],xmm3[9],xmm1[10],xmm3[10],xmm1[11],xmm3[11],xmm1[12],xmm3[12],xmm1[13],xmm3[13],xmm1[14],xmm3[14],xmm1[15],xmm3[15] -; AVX1-NEXT: vmovdqa %xmm0, -{{[0-9]+}}(%rsp) # 16-byte Spill +; AVX1-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm14[8],xmm13[8],xmm14[9],xmm13[9],xmm14[10],xmm13[10],xmm14[11],xmm13[11],xmm14[12],xmm13[12],xmm14[13],xmm13[13],xmm14[14],xmm13[14],xmm14[15],xmm13[15] -; AVX1-NEXT: vmovdqa %xmm0, -{{[0-9]+}}(%rsp) # 16-byte Spill +; AVX1-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm14 = xmm4[0],xmm6[0],xmm4[1],xmm6[1],xmm4[2],xmm6[2],xmm4[3],xmm6[3],xmm4[4],xmm6[4],xmm4[5],xmm6[5],xmm4[6],xmm6[6],xmm4[7],xmm6[7] ; AVX1-NEXT: vextractf128 $1, %ymm6, %xmm1 ; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm3 ; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm12 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7] ; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm4[8],xmm6[8],xmm4[9],xmm6[9],xmm4[10],xmm6[10],xmm4[11],xmm6[11],xmm4[12],xmm6[12],xmm4[13],xmm6[13],xmm4[14],xmm6[14],xmm4[15],xmm6[15] -; AVX1-NEXT: vmovdqa %xmm0, -{{[0-9]+}}(%rsp) # 16-byte Spill +; AVX1-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm5[0],xmm7[0],xmm5[1],xmm7[1],xmm5[2],xmm7[2],xmm5[3],xmm7[3],xmm5[4],xmm7[4],xmm5[5],xmm7[5],xmm5[6],xmm7[6],xmm5[7],xmm7[7] ; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm3[8],xmm1[8],xmm3[9],xmm1[9],xmm3[10],xmm1[10],xmm3[11],xmm1[11],xmm3[12],xmm1[12],xmm3[13],xmm1[13],xmm3[14],xmm1[14],xmm3[15],xmm1[15] -; AVX1-NEXT: vmovdqa %xmm1, -{{[0-9]+}}(%rsp) # 16-byte Spill +; AVX1-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX1-NEXT: vextractf128 $1, %ymm7, %xmm3 ; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm7 = xmm5[8],xmm7[8],xmm5[9],xmm7[9],xmm5[10],xmm7[10],xmm5[11],xmm7[11],xmm5[12],xmm7[12],xmm5[13],xmm7[13],xmm5[14],xmm7[14],xmm5[15],xmm7[15] ; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm4 @@ -1756,7 +1758,7 @@ ; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm4 = xmm15[0],xmm5[0],xmm15[1],xmm5[1],xmm15[2],xmm5[2],xmm15[3],xmm5[3] ; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm11 = xmm10[0],xmm0[0],xmm10[1],xmm0[1],xmm10[2],xmm0[2],xmm10[3],xmm0[3] ; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm11, %ymm1 -; AVX1-NEXT: vmovups %ymm1, -{{[0-9]+}}(%rsp) # 32-byte Spill +; AVX1-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill ; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm4 = xmm9[0],xmm12[0],xmm9[1],xmm12[1],xmm9[2],xmm12[2],xmm9[3],xmm12[3] ; AVX1-NEXT: vmovdqa %xmm8, %xmm2 ; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm8 = xmm8[0],xmm14[0],xmm8[1],xmm14[1],xmm8[2],xmm14[2],xmm8[3],xmm14[3] @@ -1765,16 +1767,16 @@ ; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm10 = xmm10[4],xmm0[4],xmm10[5],xmm0[5],xmm10[6],xmm0[6],xmm10[7],xmm0[7] ; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm12 = xmm9[4],xmm12[4],xmm9[5],xmm12[5],xmm9[6],xmm12[6],xmm9[7],xmm12[7] ; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm8 = xmm2[4],xmm14[4],xmm2[5],xmm14[5],xmm2[6],xmm14[6],xmm2[7],xmm14[7] -; AVX1-NEXT: vmovdqa -{{[0-9]+}}(%rsp), %xmm2 # 16-byte Reload +; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload ; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm5 = xmm2[0],xmm6[0],xmm2[1],xmm6[1],xmm2[2],xmm6[2],xmm2[3],xmm6[3] -; AVX1-NEXT: vmovdqa -{{[0-9]+}}(%rsp), %xmm4 # 16-byte Reload +; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload ; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm9 = xmm4[0],xmm7[0],xmm4[1],xmm7[1],xmm4[2],xmm7[2],xmm4[3],xmm7[3] ; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm9, %ymm14 -; AVX1-NEXT: vmovdqa -{{[0-9]+}}(%rsp), %xmm3 # 16-byte Reload -; AVX1-NEXT: vmovdqa -{{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload +; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload +; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload ; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm11 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3] -; AVX1-NEXT: vmovdqa -{{[0-9]+}}(%rsp), %xmm5 # 16-byte Reload -; AVX1-NEXT: vmovdqa -{{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload +; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload +; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm9 = xmm5[0],xmm0[0],xmm5[1],xmm0[1],xmm5[2],xmm0[2],xmm5[3],xmm0[3] ; AVX1-NEXT: vinsertf128 $1, %xmm11, %ymm9, %ymm9 ; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm11 = xmm2[4],xmm6[4],xmm2[5],xmm6[5],xmm2[6],xmm6[6],xmm2[7],xmm6[7] @@ -1788,7 +1790,7 @@ ; AVX1-NEXT: vinsertf128 $1, %xmm6, %ymm9, %ymm6 ; AVX1-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm9[2,3],ymm1[2,3] ; AVX1-NEXT: vinsertf128 $1, %xmm15, %ymm10, %ymm2 -; AVX1-NEXT: vmovups -{{[0-9]+}}(%rsp), %ymm3 # 32-byte Reload +; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload ; AVX1-NEXT: vinsertf128 $1, %xmm10, %ymm3, %ymm0 ; AVX1-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm3[2,3],ymm2[2,3] ; AVX1-NEXT: vinsertf128 $1, %xmm11, %ymm7, %ymm3