Index: llvm/lib/Target/X86/X86ISelLowering.cpp =================================================================== --- llvm/lib/Target/X86/X86ISelLowering.cpp +++ llvm/lib/Target/X86/X86ISelLowering.cpp @@ -45805,6 +45805,31 @@ if (VT != Op1.getValueType() || !VT.isSimple() || !VT.isInteger()) return SDValue(); + // Try to convert an "is positive" signbit masking operation into arithmetic + // shift and "andn". This saves a materialization of a -1 vector constant. + // The "is negative" variant should be handled more generally because it only + // requires "and" rather than "andn": + // and (pcmpgt X, -1), Y --> pandn (sra X, BitWidth - 1), Y + if (supportedVectorShiftWithImm(VT.getSimpleVT(), Subtarget, ISD::SRA)) { + SDValue X, Y; + if (Op1.hasOneUse() && Op1.getOpcode() == X86ISD::PCMPGT && + isAllOnesOrAllOnesSplat(Op1.getOperand(1))) { + X = Op1.getOperand(0); + Y = Op0; + } else if (Op0.hasOneUse() && Op0.getOpcode() == X86ISD::PCMPGT && + isAllOnesOrAllOnesSplat(Op0.getOperand(1))) { + X = Op0.getOperand(0); + Y = Op1; + } + if (X && Y) { + SDLoc DL(N); + SDValue Sra = + getTargetVShiftByConstNode(X86ISD::VSRAI, DL, VT.getSimpleVT(), X, + VT.getScalarSizeInBits() - 1, DAG); + return DAG.getNode(X86ISD::ANDNP, DL, VT, Sra, Y); + } + } + APInt SplatVal; if (!ISD::isConstantSplatVector(Op1.getNode(), SplatVal) || !SplatVal.isMask()) Index: llvm/test/CodeGen/X86/vector-pcmp.ll =================================================================== --- llvm/test/CodeGen/X86/vector-pcmp.ll +++ llvm/test/CodeGen/X86/vector-pcmp.ll @@ -587,16 +587,14 @@ define <4 x i32> @not_signbit_mask_v4i32(<4 x i32> %x, <4 x i32> %y) { ; SSE-LABEL: not_signbit_mask_v4i32: ; SSE: # %bb.0: -; SSE-NEXT: pcmpeqd %xmm2, %xmm2 -; SSE-NEXT: pcmpgtd %xmm2, %xmm0 -; SSE-NEXT: pand %xmm1, %xmm0 +; SSE-NEXT: psrad $31, %xmm0 +; SSE-NEXT: pandn %xmm1, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: not_signbit_mask_v4i32: ; AVX: # %bb.0: -; AVX-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 -; AVX-NEXT: vpcmpgtd %xmm2, %xmm0, %xmm0 -; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vpsrad $31, %xmm0, %xmm0 +; AVX-NEXT: vpandn %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq %sh = ashr <4 x i32> %x, %not = xor <4 x i32> %sh, @@ -607,16 +605,14 @@ define <8 x i16> @not_signbit_mask_v8i16(<8 x i16> %x, <8 x i16> %y) { ; SSE-LABEL: not_signbit_mask_v8i16: ; SSE: # %bb.0: -; SSE-NEXT: pcmpeqd %xmm2, %xmm2 -; SSE-NEXT: pcmpgtw %xmm2, %xmm0 -; SSE-NEXT: pand %xmm1, %xmm0 +; SSE-NEXT: psraw $15, %xmm0 +; SSE-NEXT: pandn %xmm1, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: not_signbit_mask_v8i16: ; AVX: # %bb.0: -; AVX-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 -; AVX-NEXT: vpcmpgtw %xmm2, %xmm0, %xmm0 -; AVX-NEXT: vpand %xmm0, %xmm1, %xmm0 +; AVX-NEXT: vpsraw $15, %xmm0, %xmm0 +; AVX-NEXT: vpandn %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq %sh = ashr <8 x i16> %x, %not = xor <8 x i16> %sh, @@ -690,11 +686,10 @@ define <8 x i32> @not_signbit_mask_v8i32(<8 x i32> %x, <8 x i32> %y) { ; SSE-LABEL: not_signbit_mask_v8i32: ; SSE: # %bb.0: -; SSE-NEXT: pcmpeqd %xmm4, %xmm4 -; SSE-NEXT: pcmpgtd %xmm4, %xmm1 -; SSE-NEXT: pcmpgtd %xmm4, %xmm0 -; SSE-NEXT: pand %xmm2, %xmm0 -; SSE-NEXT: pand %xmm3, %xmm1 +; SSE-NEXT: psrad $31, %xmm0 +; SSE-NEXT: pandn %xmm2, %xmm0 +; SSE-NEXT: psrad $31, %xmm1 +; SSE-NEXT: pandn %xmm3, %xmm1 ; SSE-NEXT: retq ; ; AVX1-LABEL: not_signbit_mask_v8i32: @@ -708,9 +703,8 @@ ; ; AVX2-LABEL: not_signbit_mask_v8i32: ; AVX2: # %bb.0: -; AVX2-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2 -; AVX2-NEXT: vpcmpgtd %ymm2, %ymm0, %ymm0 -; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: vpsrad $31, %ymm0, %ymm0 +; AVX2-NEXT: vpandn %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: retq %sh = ashr <8 x i32> %x, %not = xor <8 x i32> %sh, @@ -721,11 +715,10 @@ define <16 x i16> @not_signbit_mask_v16i16(<16 x i16> %x, <16 x i16> %y) { ; SSE-LABEL: not_signbit_mask_v16i16: ; SSE: # %bb.0: -; SSE-NEXT: pcmpeqd %xmm4, %xmm4 -; SSE-NEXT: pcmpgtw %xmm4, %xmm1 -; SSE-NEXT: pcmpgtw %xmm4, %xmm0 -; SSE-NEXT: pand %xmm2, %xmm0 -; SSE-NEXT: pand %xmm3, %xmm1 +; SSE-NEXT: psraw $15, %xmm0 +; SSE-NEXT: pandn %xmm2, %xmm0 +; SSE-NEXT: psraw $15, %xmm1 +; SSE-NEXT: pandn %xmm3, %xmm1 ; SSE-NEXT: retq ; ; AVX1-LABEL: not_signbit_mask_v16i16: @@ -739,9 +732,8 @@ ; ; AVX2-LABEL: not_signbit_mask_v16i16: ; AVX2: # %bb.0: -; AVX2-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2 -; AVX2-NEXT: vpcmpgtw %ymm2, %ymm0, %ymm0 -; AVX2-NEXT: vpand %ymm0, %ymm1, %ymm0 +; AVX2-NEXT: vpsraw $15, %ymm0, %ymm0 +; AVX2-NEXT: vpandn %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: retq %sh = ashr <16 x i16> %x, %not = xor <16 x i16> %sh, @@ -812,16 +804,14 @@ define <4 x i32> @is_positive_mask_v4i32(<4 x i32> %x, <4 x i32> %y) { ; SSE-LABEL: is_positive_mask_v4i32: ; SSE: # %bb.0: -; SSE-NEXT: pcmpeqd %xmm2, %xmm2 -; SSE-NEXT: pcmpgtd %xmm2, %xmm0 -; SSE-NEXT: pand %xmm1, %xmm0 +; SSE-NEXT: psrad $31, %xmm0 +; SSE-NEXT: pandn %xmm1, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: is_positive_mask_v4i32: ; AVX: # %bb.0: -; AVX-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 -; AVX-NEXT: vpcmpgtd %xmm2, %xmm0, %xmm0 -; AVX-NEXT: vpand %xmm0, %xmm1, %xmm0 +; AVX-NEXT: vpsrad $31, %xmm0, %xmm0 +; AVX-NEXT: vpandn %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq %cmp = icmp sgt <4 x i32> %x, %mask = sext <4 x i1> %cmp to <4 x i32> @@ -832,16 +822,14 @@ define <8 x i16> @is_positive_mask_v8i16(<8 x i16> %x, <8 x i16> %y) { ; SSE-LABEL: is_positive_mask_v8i16: ; SSE: # %bb.0: -; SSE-NEXT: pcmpeqd %xmm2, %xmm2 -; SSE-NEXT: pcmpgtw %xmm2, %xmm0 -; SSE-NEXT: pand %xmm1, %xmm0 +; SSE-NEXT: psraw $15, %xmm0 +; SSE-NEXT: pandn %xmm1, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: is_positive_mask_v8i16: ; AVX: # %bb.0: -; AVX-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 -; AVX-NEXT: vpcmpgtw %xmm2, %xmm0, %xmm0 -; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vpsraw $15, %xmm0, %xmm0 +; AVX-NEXT: vpandn %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq %cmp = icmp sgt <8 x i16> %x, %mask = sext <8 x i1> %cmp to <8 x i16> @@ -915,11 +903,10 @@ define <8 x i32> @is_positive_mask_v8i32(<8 x i32> %x, <8 x i32> %y) { ; SSE-LABEL: is_positive_mask_v8i32: ; SSE: # %bb.0: -; SSE-NEXT: pcmpeqd %xmm4, %xmm4 -; SSE-NEXT: pcmpgtd %xmm4, %xmm1 -; SSE-NEXT: pcmpgtd %xmm4, %xmm0 -; SSE-NEXT: pand %xmm2, %xmm0 -; SSE-NEXT: pand %xmm3, %xmm1 +; SSE-NEXT: psrad $31, %xmm0 +; SSE-NEXT: pandn %xmm2, %xmm0 +; SSE-NEXT: psrad $31, %xmm1 +; SSE-NEXT: pandn %xmm3, %xmm1 ; SSE-NEXT: retq ; ; AVX1-LABEL: is_positive_mask_v8i32: @@ -934,9 +921,8 @@ ; ; AVX2-LABEL: is_positive_mask_v8i32: ; AVX2: # %bb.0: -; AVX2-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2 -; AVX2-NEXT: vpcmpgtd %ymm2, %ymm0, %ymm0 -; AVX2-NEXT: vpand %ymm0, %ymm1, %ymm0 +; AVX2-NEXT: vpsrad $31, %ymm0, %ymm0 +; AVX2-NEXT: vpandn %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: retq %cmp = icmp sgt <8 x i32> %x, %mask = sext <8 x i1> %cmp to <8 x i32> @@ -947,11 +933,10 @@ define <16 x i16> @is_positive_mask_v16i16(<16 x i16> %x, <16 x i16> %y) { ; SSE-LABEL: is_positive_mask_v16i16: ; SSE: # %bb.0: -; SSE-NEXT: pcmpeqd %xmm4, %xmm4 -; SSE-NEXT: pcmpgtw %xmm4, %xmm1 -; SSE-NEXT: pcmpgtw %xmm4, %xmm0 -; SSE-NEXT: pand %xmm2, %xmm0 -; SSE-NEXT: pand %xmm3, %xmm1 +; SSE-NEXT: psraw $15, %xmm0 +; SSE-NEXT: pandn %xmm2, %xmm0 +; SSE-NEXT: psraw $15, %xmm1 +; SSE-NEXT: pandn %xmm3, %xmm1 ; SSE-NEXT: retq ; ; AVX1-LABEL: is_positive_mask_v16i16: @@ -966,9 +951,8 @@ ; ; AVX2-LABEL: is_positive_mask_v16i16: ; AVX2: # %bb.0: -; AVX2-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2 -; AVX2-NEXT: vpcmpgtw %ymm2, %ymm0, %ymm0 -; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: vpsraw $15, %ymm0, %ymm0 +; AVX2-NEXT: vpandn %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: retq %cmp = icmp sgt <16 x i16> %x, %mask = sext <16 x i1> %cmp to <16 x i16> @@ -1040,16 +1024,14 @@ define <4 x i32> @is_positive_mask_load_v4i32(<4 x i32> %x, <4 x i32>* %p) { ; SSE-LABEL: is_positive_mask_load_v4i32: ; SSE: # %bb.0: -; SSE-NEXT: pcmpeqd %xmm1, %xmm1 -; SSE-NEXT: pcmpgtd %xmm1, %xmm0 -; SSE-NEXT: pand (%rdi), %xmm0 +; SSE-NEXT: psrad $31, %xmm0 +; SSE-NEXT: pandn (%rdi), %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: is_positive_mask_load_v4i32: ; AVX: # %bb.0: -; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 -; AVX-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0 -; AVX-NEXT: vpand (%rdi), %xmm0, %xmm0 +; AVX-NEXT: vpsrad $31, %xmm0, %xmm0 +; AVX-NEXT: vpandn (%rdi), %xmm0, %xmm0 ; AVX-NEXT: retq %cmp = icmp sgt <4 x i32> %x, %mask = sext <4 x i1> %cmp to <4 x i32> @@ -1061,16 +1043,14 @@ define <8 x i16> @is_positive_mask_load_v8i16(<8 x i16> %x, <8 x i16>* %p) { ; SSE-LABEL: is_positive_mask_load_v8i16: ; SSE: # %bb.0: -; SSE-NEXT: pcmpeqd %xmm1, %xmm1 -; SSE-NEXT: pcmpgtw %xmm1, %xmm0 -; SSE-NEXT: pand (%rdi), %xmm0 +; SSE-NEXT: psraw $15, %xmm0 +; SSE-NEXT: pandn (%rdi), %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: is_positive_mask_load_v8i16: ; AVX: # %bb.0: -; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 -; AVX-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm0 -; AVX-NEXT: vpand (%rdi), %xmm0, %xmm0 +; AVX-NEXT: vpsraw $15, %xmm0, %xmm0 +; AVX-NEXT: vpandn (%rdi), %xmm0, %xmm0 ; AVX-NEXT: retq %cmp = icmp sgt <8 x i16> %x, %mask = sext <8 x i1> %cmp to <8 x i16> @@ -1147,11 +1127,10 @@ define <8 x i32> @is_positive_mask_load_v8i32(<8 x i32> %x, <8 x i32>* %p) { ; SSE-LABEL: is_positive_mask_load_v8i32: ; SSE: # %bb.0: -; SSE-NEXT: pcmpeqd %xmm2, %xmm2 -; SSE-NEXT: pcmpgtd %xmm2, %xmm1 -; SSE-NEXT: pcmpgtd %xmm2, %xmm0 -; SSE-NEXT: pand (%rdi), %xmm0 -; SSE-NEXT: pand 16(%rdi), %xmm1 +; SSE-NEXT: psrad $31, %xmm0 +; SSE-NEXT: pandn (%rdi), %xmm0 +; SSE-NEXT: psrad $31, %xmm1 +; SSE-NEXT: pandn 16(%rdi), %xmm1 ; SSE-NEXT: retq ; ; AVX1-LABEL: is_positive_mask_load_v8i32: @@ -1166,9 +1145,8 @@ ; ; AVX2-LABEL: is_positive_mask_load_v8i32: ; AVX2: # %bb.0: -; AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 -; AVX2-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0 -; AVX2-NEXT: vpand (%rdi), %ymm0, %ymm0 +; AVX2-NEXT: vpsrad $31, %ymm0, %ymm0 +; AVX2-NEXT: vpandn (%rdi), %ymm0, %ymm0 ; AVX2-NEXT: retq %cmp = icmp sgt <8 x i32> %x, %mask = sext <8 x i1> %cmp to <8 x i32> @@ -1180,11 +1158,10 @@ define <16 x i16> @is_positive_mask_load_v16i16(<16 x i16> %x, <16 x i16>* %p) { ; SSE-LABEL: is_positive_mask_load_v16i16: ; SSE: # %bb.0: -; SSE-NEXT: pcmpeqd %xmm2, %xmm2 -; SSE-NEXT: pcmpgtw %xmm2, %xmm1 -; SSE-NEXT: pcmpgtw %xmm2, %xmm0 -; SSE-NEXT: pand (%rdi), %xmm0 -; SSE-NEXT: pand 16(%rdi), %xmm1 +; SSE-NEXT: psraw $15, %xmm0 +; SSE-NEXT: pandn (%rdi), %xmm0 +; SSE-NEXT: psraw $15, %xmm1 +; SSE-NEXT: pandn 16(%rdi), %xmm1 ; SSE-NEXT: retq ; ; AVX1-LABEL: is_positive_mask_load_v16i16: @@ -1199,9 +1176,8 @@ ; ; AVX2-LABEL: is_positive_mask_load_v16i16: ; AVX2: # %bb.0: -; AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 -; AVX2-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0 -; AVX2-NEXT: vpand (%rdi), %ymm0, %ymm0 +; AVX2-NEXT: vpsraw $15, %ymm0, %ymm0 +; AVX2-NEXT: vpandn (%rdi), %ymm0, %ymm0 ; AVX2-NEXT: retq %cmp = icmp sgt <16 x i16> %x, %mask = sext <16 x i1> %cmp to <16 x i16> Index: llvm/test/CodeGen/X86/vselect-zero.ll =================================================================== --- llvm/test/CodeGen/X86/vselect-zero.ll +++ llvm/test/CodeGen/X86/vselect-zero.ll @@ -793,23 +793,20 @@ define <8 x i16> @not_signbit_mask_v8i16(<8 x i16> %a, <8 x i16> %b) { ; SSE-LABEL: not_signbit_mask_v8i16: ; SSE: # %bb.0: -; SSE-NEXT: pcmpeqd %xmm2, %xmm2 -; SSE-NEXT: pcmpgtw %xmm2, %xmm0 -; SSE-NEXT: pand %xmm1, %xmm0 +; SSE-NEXT: psraw $15, %xmm0 +; SSE-NEXT: pandn %xmm1, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: not_signbit_mask_v8i16: ; AVX: # %bb.0: -; AVX-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 -; AVX-NEXT: vpcmpgtw %xmm2, %xmm0, %xmm0 -; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vpsraw $15, %xmm0, %xmm0 +; AVX-NEXT: vpandn %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq ; ; AVX512-LABEL: not_signbit_mask_v8i16: ; AVX512: # %bb.0: -; AVX512-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 -; AVX512-NEXT: vpcmpgtw %xmm2, %xmm0, %xmm0 -; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0 +; AVX512-NEXT: vpsraw $15, %xmm0, %xmm0 +; AVX512-NEXT: vpandn %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: retq %cond = icmp sgt <8 x i16> %a, %r = select <8 x i1> %cond, <8 x i16> %b, <8 x i16> zeroinitializer @@ -819,23 +816,20 @@ define <4 x i32> @not_signbit_mask_v4i32(<4 x i32> %a, <4 x i32> %b) { ; SSE-LABEL: not_signbit_mask_v4i32: ; SSE: # %bb.0: -; SSE-NEXT: pcmpeqd %xmm2, %xmm2 -; SSE-NEXT: pcmpgtd %xmm2, %xmm0 -; SSE-NEXT: pand %xmm1, %xmm0 +; SSE-NEXT: psrad $31, %xmm0 +; SSE-NEXT: pandn %xmm1, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: not_signbit_mask_v4i32: ; AVX: # %bb.0: -; AVX-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 -; AVX-NEXT: vpcmpgtd %xmm2, %xmm0, %xmm0 -; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vpsrad $31, %xmm0, %xmm0 +; AVX-NEXT: vpandn %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq ; ; AVX512-LABEL: not_signbit_mask_v4i32: ; AVX512: # %bb.0: -; AVX512-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 -; AVX512-NEXT: vpcmpgtd %xmm2, %xmm0, %xmm0 -; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0 +; AVX512-NEXT: vpsrad $31, %xmm0, %xmm0 +; AVX512-NEXT: vpandn %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: retq %cond = icmp sgt <4 x i32> %a, %r = select <4 x i1> %cond, <4 x i32> %b, <4 x i32> zeroinitializer @@ -867,9 +861,8 @@ ; ; AVX512-LABEL: not_signbit_mask_v2i64: ; AVX512: # %bb.0: -; AVX512-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 -; AVX512-NEXT: vpcmpgtq %xmm2, %xmm0, %xmm0 -; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0 +; AVX512-NEXT: vpsraq $63, %xmm0, %xmm0 +; AVX512-NEXT: vpandn %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: retq %cond = icmp sgt <2 x i64> %a, %r = select <2 x i1> %cond, <2 x i64> %b, <2 x i64> zeroinitializer @@ -917,11 +910,10 @@ define <16 x i16> @not_signbit_mask_v16i16(<16 x i16> %a, <16 x i16> %b) { ; SSE-LABEL: not_signbit_mask_v16i16: ; SSE: # %bb.0: -; SSE-NEXT: pcmpeqd %xmm4, %xmm4 -; SSE-NEXT: pcmpgtw %xmm4, %xmm0 -; SSE-NEXT: pand %xmm2, %xmm0 -; SSE-NEXT: pcmpgtw %xmm4, %xmm1 -; SSE-NEXT: pand %xmm3, %xmm1 +; SSE-NEXT: psraw $15, %xmm0 +; SSE-NEXT: pandn %xmm2, %xmm0 +; SSE-NEXT: psraw $15, %xmm1 +; SSE-NEXT: pandn %xmm3, %xmm1 ; SSE-NEXT: retq ; ; AVX1-LABEL: not_signbit_mask_v16i16: @@ -935,16 +927,14 @@ ; ; AVX2-LABEL: not_signbit_mask_v16i16: ; AVX2: # %bb.0: -; AVX2-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2 -; AVX2-NEXT: vpcmpgtw %ymm2, %ymm0, %ymm0 -; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: vpsraw $15, %ymm0, %ymm0 +; AVX2-NEXT: vpandn %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: retq ; ; AVX512-LABEL: not_signbit_mask_v16i16: ; AVX512: # %bb.0: -; AVX512-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2 -; AVX512-NEXT: vpcmpgtw %ymm2, %ymm0, %ymm0 -; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm0 +; AVX512-NEXT: vpsraw $15, %ymm0, %ymm0 +; AVX512-NEXT: vpandn %ymm1, %ymm0, %ymm0 ; AVX512-NEXT: retq %cond = icmp sgt <16 x i16> %a, %r = select <16 x i1> %cond, <16 x i16> %b, <16 x i16> zeroinitializer @@ -954,11 +944,10 @@ define <8 x i32> @not_signbit_mask_v8i32(<8 x i32> %a, <8 x i32> %b) { ; SSE-LABEL: not_signbit_mask_v8i32: ; SSE: # %bb.0: -; SSE-NEXT: pcmpeqd %xmm4, %xmm4 -; SSE-NEXT: pcmpgtd %xmm4, %xmm0 -; SSE-NEXT: pand %xmm2, %xmm0 -; SSE-NEXT: pcmpgtd %xmm4, %xmm1 -; SSE-NEXT: pand %xmm3, %xmm1 +; SSE-NEXT: psrad $31, %xmm0 +; SSE-NEXT: pandn %xmm2, %xmm0 +; SSE-NEXT: psrad $31, %xmm1 +; SSE-NEXT: pandn %xmm3, %xmm1 ; SSE-NEXT: retq ; ; AVX1-LABEL: not_signbit_mask_v8i32: @@ -972,16 +961,14 @@ ; ; AVX2-LABEL: not_signbit_mask_v8i32: ; AVX2: # %bb.0: -; AVX2-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2 -; AVX2-NEXT: vpcmpgtd %ymm2, %ymm0, %ymm0 -; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: vpsrad $31, %ymm0, %ymm0 +; AVX2-NEXT: vpandn %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: retq ; ; AVX512-LABEL: not_signbit_mask_v8i32: ; AVX512: # %bb.0: -; AVX512-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2 -; AVX512-NEXT: vpcmpgtd %ymm2, %ymm0, %ymm0 -; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm0 +; AVX512-NEXT: vpsrad $31, %ymm0, %ymm0 +; AVX512-NEXT: vpandn %ymm1, %ymm0, %ymm0 ; AVX512-NEXT: retq %cond = icmp sgt <8 x i32> %a, %r = select <8 x i1> %cond, <8 x i32> %b, <8 x i32> zeroinitializer @@ -1028,9 +1015,8 @@ ; ; AVX512-LABEL: not_signbit_mask_v4i64: ; AVX512: # %bb.0: -; AVX512-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2 -; AVX512-NEXT: vpcmpgtq %ymm2, %ymm0, %ymm0 -; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm0 +; AVX512-NEXT: vpsraq $63, %ymm0, %ymm0 +; AVX512-NEXT: vpandn %ymm1, %ymm0, %ymm0 ; AVX512-NEXT: retq %cond = icmp sgt <4 x i64> %a, %r = select <4 x i1> %cond, <4 x i64> %b, <4 x i64> zeroinitializer