Index: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp =================================================================== --- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp +++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp @@ -18099,6 +18099,15 @@ } } + // If this is a SETNE against the signed minimum value, change it to SETGT. + // Otherwise we use PCMPEQ+invert. + APInt ConstValue; + if (Cond == ISD::SETNE && + ISD::isConstantSplatVector(Op1.getNode(), ConstValue), + ConstValue.isMinSignedValue()) { + Cond = ISD::SETGT; + } + // If both operands are known non-negative, then an unsigned compare is the // same as a signed compare and there's no need to flip signbits. // TODO: We could check for more general simplifications here since we're Index: llvm/trunk/test/CodeGen/X86/vector-compare-simplify.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/vector-compare-simplify.ll +++ llvm/trunk/test/CodeGen/X86/vector-compare-simplify.ll @@ -334,3 +334,14 @@ ret <4 x i32> %r } +; Make sure we can efficiently handle ne smin by turning into sgt. +define <4 x i32> @ne_smin(<4 x i32> %x) { +; CHECK-LABEL: ne_smin: +; CHECK: # %bb.0: +; CHECK-NEXT: pcmpgtd {{.*}}(%rip), %xmm0 +; CHECK-NEXT: retq + %cmp = icmp ne <4 x i32> %x, + %r = sext <4 x i1> %cmp to <4 x i32> + ret <4 x i32> %r +} +