Index: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp =================================================================== --- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp +++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp @@ -33923,8 +33923,8 @@ bool FValIsAllZeros = ISD::isBuildVectorAllZeros(RHS.getNode()); // Try to invert the condition if true value is not all 1s and false value is - // not all 0s. - if (!TValIsAllOnes && !FValIsAllZeros && + // not all 0s. Only do this if the condition has one use. + if (!TValIsAllOnes && !FValIsAllZeros && Cond.hasOneUse() && // Check if the selector will be produced by CMPP*/PCMP*. Cond.getOpcode() == ISD::SETCC && // Check if SETCC has already been promoted. Index: llvm/trunk/test/CodeGen/X86/vselect.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/vselect.ll +++ llvm/trunk/test/CodeGen/X86/vselect.ll @@ -617,26 +617,23 @@ ; SSE: # %bb.0: ; SSE-NEXT: movdqa {{.*#+}} xmm3 = [1,2,4,8] ; SSE-NEXT: pand %xmm3, %xmm0 -; SSE-NEXT: pcmpeqd %xmm0, %xmm3 -; SSE-NEXT: pxor %xmm4, %xmm4 -; SSE-NEXT: pcmpeqd %xmm0, %xmm4 -; SSE-NEXT: pand %xmm1, %xmm4 -; SSE-NEXT: pand %xmm2, %xmm3 -; SSE-NEXT: movdqa %xmm4, (%rdi) -; SSE-NEXT: movdqa %xmm3, (%rsi) +; SSE-NEXT: pcmpeqd %xmm3, %xmm0 +; SSE-NEXT: movdqa %xmm0, %xmm3 +; SSE-NEXT: pandn %xmm1, %xmm3 +; SSE-NEXT: pand %xmm2, %xmm0 +; SSE-NEXT: movdqa %xmm3, (%rdi) +; SSE-NEXT: movdqa %xmm0, (%rsi) ; SSE-NEXT: retq ; ; AVX-LABEL: vselect_allzeros_LHS_multiple_use_setcc: ; AVX: # %bb.0: ; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [1,2,4,8] ; AVX-NEXT: vpand %xmm3, %xmm0, %xmm0 -; AVX-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm3 -; AVX-NEXT: vpxor %xmm4, %xmm4, %xmm4 -; AVX-NEXT: vpcmpeqd %xmm4, %xmm0, %xmm0 -; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0 -; AVX-NEXT: vpand %xmm2, %xmm3, %xmm1 -; AVX-NEXT: vmovdqa %xmm0, (%rdi) -; AVX-NEXT: vmovdqa %xmm1, (%rsi) +; AVX-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0 +; AVX-NEXT: vpandn %xmm1, %xmm0, %xmm1 +; AVX-NEXT: vpand %xmm2, %xmm0, %xmm0 +; AVX-NEXT: vmovdqa %xmm1, (%rdi) +; AVX-NEXT: vmovdqa %xmm0, (%rsi) ; AVX-NEXT: retq %and = and <4 x i32> %x, %cond = icmp ne <4 x i32> %and, zeroinitializer