Index: lib/Target/X86/X86ISelLowering.cpp =================================================================== --- lib/Target/X86/X86ISelLowering.cpp +++ lib/Target/X86/X86ISelLowering.cpp @@ -26791,9 +26791,49 @@ return Cond; } +/// Turn vector tests of the signbit in the form of: +/// xor (sra X, elt_size(X)-1), -1 +/// into: +/// pcmpgt X, -1 +static SDValue foldVectorXorShiftIntoCmp(SDNode *N, SelectionDAG &DAG, + const X86Subtarget &Subtarget) { + EVT VT = N->getValueType(0); + // TODO: AVX2 can handle 256-bit integer vectors. + if (!VT.is128BitVector() || !Subtarget.hasSSE2()) + return SDValue(); + + if (VT == MVT::v2i64 && !Subtarget.hasSSE42()) + return SDValue(); + + // There must be a shift right algebraic before the xor, and the xor must be a + // 'not' operation. + SDValue Shift = N->getOperand(0); + SDValue Ones = N->getOperand(1); + if (Shift.getOpcode() != ISD::SRA || !Shift.hasOneUse() || + !ISD::isBuildVectorAllOnes(Ones.getNode())) + return SDValue(); + + // The shift should be smearing the sign bit across each vector element. + auto *ShiftBV = dyn_cast(Shift.getOperand(1)); + if (!ShiftBV) + return SDValue(); + + EVT ShiftEltTy = Shift.getValueType().getVectorElementType(); + auto *ShiftAmt = ShiftBV->getConstantSplatNode(); + if (!ShiftAmt || ShiftAmt->getZExtValue() != ShiftEltTy.getSizeInBits() - 1) + return SDValue(); + + // Create a greater-than comparison against -1. We don't use the more obvious + // greater-than-or-equal-to-zero because SSE/AVX don't have that instruction. + return DAG.getNode(X86ISD::PCMPGT, SDLoc(N), VT, Shift.getOperand(0), Ones); +} + static SDValue PerformXorCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const X86Subtarget &Subtarget) { + if (SDValue Cmp = foldVectorXorShiftIntoCmp(N, DAG, Subtarget)) + return Cmp; + if (DCI.isBeforeLegalizeOps()) return SDValue(); Index: test/CodeGen/X86/vector-pcmp.ll =================================================================== --- test/CodeGen/X86/vector-pcmp.ll +++ test/CodeGen/X86/vector-pcmp.ll @@ -9,18 +9,14 @@ define <16 x i8> @pcmpgtb(<16 x i8> %x) { ; SSE-LABEL: pcmpgtb: ; SSE: # BB#0: -; SSE-NEXT: pxor %xmm1, %xmm1 -; SSE-NEXT: pcmpgtb %xmm0, %xmm1 -; SSE-NEXT: pcmpeqd %xmm0, %xmm0 -; SSE-NEXT: pxor %xmm1, %xmm0 +; SSE-NEXT: pcmpeqd %xmm1, %xmm1 +; SSE-NEXT: pcmpgtb %xmm1, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: pcmpgtb: ; AVX: # BB#0: -; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1 -; AVX-NEXT: vpcmpgtb %xmm0, %xmm1, %xmm0 ; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 -; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq %sign = ashr <16 x i8> %x, %not = xor <16 x i8> %sign, @@ -30,16 +26,14 @@ define <8 x i16> @pcmpgtw(<8 x i16> %x) { ; SSE-LABEL: pcmpgtw: ; SSE: # BB#0: -; SSE-NEXT: psraw $15, %xmm0 ; SSE-NEXT: pcmpeqd %xmm1, %xmm1 -; SSE-NEXT: pxor %xmm1, %xmm0 +; SSE-NEXT: pcmpgtw %xmm1, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: pcmpgtw: ; AVX: # BB#0: -; AVX-NEXT: vpsraw $15, %xmm0, %xmm0 ; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 -; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq %sign = ashr <8 x i16> %x, %not = xor <8 x i16> %sign, @@ -49,16 +43,14 @@ define <4 x i32> @pcmpgtd(<4 x i32> %x) { ; SSE-LABEL: pcmpgtd: ; SSE: # BB#0: -; SSE-NEXT: psrad $31, %xmm0 ; SSE-NEXT: pcmpeqd %xmm1, %xmm1 -; SSE-NEXT: pxor %xmm1, %xmm0 +; SSE-NEXT: pcmpgtd %xmm1, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: pcmpgtd: ; AVX: # BB#0: -; AVX-NEXT: vpsrad $31, %xmm0, %xmm0 ; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 -; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq %sign = ashr <4 x i32> %x, %not = xor <4 x i32> %sign, @@ -66,20 +58,24 @@ } define <2 x i64> @pcmpgtq(<2 x i64> %x) { -; SSE-LABEL: pcmpgtq: -; SSE: # BB#0: -; SSE-NEXT: psrad $31, %xmm0 -; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] -; SSE-NEXT: pcmpeqd %xmm0, %xmm0 -; SSE-NEXT: pxor %xmm1, %xmm0 -; SSE-NEXT: retq +; SSE2-LABEL: pcmpgtq: +; SSE2: # BB#0: +; SSE2-NEXT: psrad $31, %xmm0 +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] +; SSE2-NEXT: pcmpeqd %xmm0, %xmm0 +; SSE2-NEXT: pxor %xmm1, %xmm0 +; SSE2-NEXT: retq +; +; SSE42-LABEL: pcmpgtq: +; SSE42: # BB#0: +; SSE42-NEXT: pcmpeqd %xmm1, %xmm1 +; SSE42-NEXT: pcmpgtq %xmm1, %xmm0 +; SSE42-NEXT: retq ; ; AVX-LABEL: pcmpgtq: ; AVX: # BB#0: -; AVX-NEXT: vpsrad $31, %xmm0, %xmm0 -; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] ; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 -; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq %sign = ashr <2 x i64> %x, %not = xor <2 x i64> %sign,