diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -54441,21 +54441,24 @@ if (SDValue AndN = MatchAndCmpEq(RHS, LHS)) return DAG.getSetCC(DL, VT, AndN, DAG.getConstant(0, DL, OpVT), CC); - // cmpeq(trunc(x),0) --> cmpeq(x,0) - // cmpne(trunc(x),0) --> cmpne(x,0) + // cmpeq(trunc(x),C) --> cmpeq(x,C) + // cmpne(trunc(x),C) --> cmpne(x,C) // iff x upper bits are zero. - // TODO: Add support for RHS to be truncate as well? if (LHS.getOpcode() == ISD::TRUNCATE && LHS.getOperand(0).getScalarValueSizeInBits() >= 32 && - isNullConstant(RHS) && !DCI.isBeforeLegalize()) { + isa(RHS) && !DCI.isBeforeLegalize()) { EVT SrcVT = LHS.getOperand(0).getValueType(); APInt UpperBits = APInt::getBitsSetFrom(SrcVT.getScalarSizeInBits(), OpVT.getScalarSizeInBits()); const TargetLowering &TLI = DAG.getTargetLoweringInfo(); + ConstantSDNode *C = cast(RHS); if (DAG.MaskedValueIsZero(LHS.getOperand(0), UpperBits) && TLI.isTypeLegal(LHS.getOperand(0).getValueType())) return DAG.getSetCC(DL, VT, LHS.getOperand(0), - DAG.getConstant(0, DL, SrcVT), CC); + DAG.getConstant(C->getAPIntValue().zextOrTrunc( + SrcVT.getScalarSizeInBits()), + DL, SrcVT), + CC); } // With C as a power of 2 and C != 0 and C != INT_MIN: diff --git a/llvm/test/CodeGen/X86/movmsk-cmp.ll b/llvm/test/CodeGen/X86/movmsk-cmp.ll --- a/llvm/test/CodeGen/X86/movmsk-cmp.ll +++ b/llvm/test/CodeGen/X86/movmsk-cmp.ll @@ -10,14 +10,14 @@ ; SSE-LABEL: allones_v16i8_sign: ; SSE: # %bb.0: ; SSE-NEXT: pmovmskb %xmm0, %eax -; SSE-NEXT: cmpw $-1, %ax +; SSE-NEXT: cmpl $65535, %eax # imm = 0xFFFF ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; ; AVX-LABEL: allones_v16i8_sign: ; AVX: # %bb.0: ; AVX-NEXT: vpmovmskb %xmm0, %eax -; AVX-NEXT: cmpw $-1, %ax +; AVX-NEXT: cmpl $65535, %eax # imm = 0xFFFF ; AVX-NEXT: sete %al ; AVX-NEXT: retq %tmp = icmp slt <16 x i8> %arg, zeroinitializer @@ -51,7 +51,7 @@ ; SSE: # %bb.0: ; SSE-NEXT: pand %xmm1, %xmm0 ; SSE-NEXT: pmovmskb %xmm0, %eax -; SSE-NEXT: cmpw $-1, %ax +; SSE-NEXT: cmpl $65535, %eax # imm = 0xFFFF ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; @@ -60,7 +60,7 @@ ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 ; AVX1-NEXT: vpand %xmm0, %xmm1, %xmm0 ; AVX1-NEXT: vpmovmskb %xmm0, %eax -; AVX1-NEXT: cmpw $-1, %ax +; AVX1-NEXT: cmpl $65535, %eax # imm = 0xFFFF ; AVX1-NEXT: sete %al ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq @@ -133,7 +133,7 @@ ; SSE-NEXT: pand %xmm3, %xmm1 ; SSE-NEXT: pand %xmm0, %xmm1 ; SSE-NEXT: pmovmskb %xmm1, %eax -; SSE-NEXT: cmpw $-1, %ax +; SSE-NEXT: cmpl $65535, %eax # imm = 0xFFFF ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; @@ -145,7 +145,7 @@ ; AVX1-NEXT: vpand %xmm0, %xmm1, %xmm0 ; AVX1-NEXT: vpand %xmm0, %xmm2, %xmm0 ; AVX1-NEXT: vpmovmskb %xmm0, %eax -; AVX1-NEXT: cmpw $-1, %ax +; AVX1-NEXT: cmpl $65535, %eax # imm = 0xFFFF ; AVX1-NEXT: sete %al ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq @@ -322,7 +322,7 @@ ; SSE: # %bb.0: ; SSE-NEXT: packsswb %xmm1, %xmm0 ; SSE-NEXT: pmovmskb %xmm0, %eax -; SSE-NEXT: cmpw $-1, %ax +; SSE-NEXT: cmpl $65535, %eax # imm = 0xFFFF ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; @@ -331,7 +331,7 @@ ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 ; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpmovmskb %xmm0, %eax -; AVX1-NEXT: cmpw $-1, %ax +; AVX1-NEXT: cmpl $65535, %eax # imm = 0xFFFF ; AVX1-NEXT: sete %al ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq @@ -341,7 +341,7 @@ ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 ; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vpmovmskb %xmm0, %eax -; AVX2-NEXT: cmpw $-1, %ax +; AVX2-NEXT: cmpl $65535, %eax # imm = 0xFFFF ; AVX2-NEXT: sete %al ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq @@ -428,7 +428,7 @@ ; SSE-NEXT: packsswb %xmm3, %xmm2 ; SSE-NEXT: pand %xmm0, %xmm2 ; SSE-NEXT: pmovmskb %xmm2, %eax -; SSE-NEXT: cmpw $-1, %ax +; SSE-NEXT: cmpl $65535, %eax # imm = 0xFFFF ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; @@ -440,7 +440,7 @@ ; AVX1-NEXT: vpacksswb %xmm2, %xmm1, %xmm1 ; AVX1-NEXT: vpand %xmm0, %xmm1, %xmm0 ; AVX1-NEXT: vpmovmskb %xmm0, %eax -; AVX1-NEXT: cmpw $-1, %ax +; AVX1-NEXT: cmpl $65535, %eax # imm = 0xFFFF ; AVX1-NEXT: sete %al ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq @@ -549,14 +549,14 @@ ; SSE-LABEL: allones_v4i32_sign: ; SSE: # %bb.0: ; SSE-NEXT: movmskps %xmm0, %eax -; SSE-NEXT: cmpb $15, %al +; SSE-NEXT: cmpl $15, %eax ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; ; AVX-LABEL: allones_v4i32_sign: ; AVX: # %bb.0: ; AVX-NEXT: vmovmskps %xmm0, %eax -; AVX-NEXT: cmpb $15, %al +; AVX-NEXT: cmpl $15, %eax ; AVX-NEXT: sete %al ; AVX-NEXT: retq %tmp = icmp slt <4 x i32> %arg, zeroinitializer @@ -597,7 +597,7 @@ ; AVX-LABEL: allones_v8i32_sign: ; AVX: # %bb.0: ; AVX-NEXT: vmovmskps %ymm0, %eax -; AVX-NEXT: cmpb $-1, %al +; AVX-NEXT: cmpl $255, %eax ; AVX-NEXT: sete %al ; AVX-NEXT: vzeroupper ; AVX-NEXT: retq @@ -635,7 +635,7 @@ ; SSE-NEXT: packssdw %xmm1, %xmm0 ; SSE-NEXT: packsswb %xmm2, %xmm0 ; SSE-NEXT: pmovmskb %xmm0, %eax -; SSE-NEXT: cmpw $-1, %ax +; SSE-NEXT: cmpl $65535, %eax # imm = 0xFFFF ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; @@ -647,7 +647,7 @@ ; AVX1-NEXT: vpackssdw %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpmovmskb %xmm0, %eax -; AVX1-NEXT: cmpw $-1, %ax +; AVX1-NEXT: cmpl $65535, %eax # imm = 0xFFFF ; AVX1-NEXT: sete %al ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq @@ -749,14 +749,14 @@ ; SSE: # %bb.0: ; SSE-NEXT: packssdw %xmm1, %xmm0 ; SSE-NEXT: movmskps %xmm0, %eax -; SSE-NEXT: cmpb $15, %al +; SSE-NEXT: cmpl $15, %eax ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; ; AVX-LABEL: allones_v4i64_sign: ; AVX: # %bb.0: ; AVX-NEXT: vmovmskpd %ymm0, %eax -; AVX-NEXT: cmpb $15, %al +; AVX-NEXT: cmpl $15, %eax ; AVX-NEXT: sete %al ; AVX-NEXT: vzeroupper ; AVX-NEXT: retq @@ -816,7 +816,7 @@ ; AVX2: # %bb.0: ; AVX2-NEXT: vpackssdw %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vmovmskps %ymm0, %eax -; AVX2-NEXT: cmpb $-1, %al +; AVX2-NEXT: cmpl $255, %eax ; AVX2-NEXT: sete %al ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq @@ -903,7 +903,7 @@ ; SSE: # %bb.0: ; SSE-NEXT: psllw $7, %xmm0 ; SSE-NEXT: pmovmskb %xmm0, %eax -; SSE-NEXT: cmpw $-1, %ax +; SSE-NEXT: cmpl $65535, %eax # imm = 0xFFFF ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; @@ -911,7 +911,7 @@ ; AVX1OR2: # %bb.0: ; AVX1OR2-NEXT: vpsllw $7, %xmm0, %xmm0 ; AVX1OR2-NEXT: vpmovmskb %xmm0, %eax -; AVX1OR2-NEXT: cmpw $-1, %ax +; AVX1OR2-NEXT: cmpl $65535, %eax # imm = 0xFFFF ; AVX1OR2-NEXT: sete %al ; AVX1OR2-NEXT: retq ; @@ -919,7 +919,7 @@ ; KNL: # %bb.0: ; KNL-NEXT: vpsllw $7, %xmm0, %xmm0 ; KNL-NEXT: vpmovmskb %xmm0, %eax -; KNL-NEXT: cmpw $-1, %ax +; KNL-NEXT: cmpl $65535, %eax # imm = 0xFFFF ; KNL-NEXT: sete %al ; KNL-NEXT: retq ; @@ -1104,7 +1104,7 @@ ; SSE-NEXT: pand %xmm1, %xmm0 ; SSE-NEXT: psllw $7, %xmm0 ; SSE-NEXT: pmovmskb %xmm0, %eax -; SSE-NEXT: cmpw $-1, %ax +; SSE-NEXT: cmpl $65535, %eax # imm = 0xFFFF ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; @@ -1114,7 +1114,7 @@ ; AVX1-NEXT: vpand %xmm0, %xmm1, %xmm0 ; AVX1-NEXT: vpsllw $7, %xmm0, %xmm0 ; AVX1-NEXT: vpmovmskb %xmm0, %eax -; AVX1-NEXT: cmpw $-1, %ax +; AVX1-NEXT: cmpl $65535, %eax # imm = 0xFFFF ; AVX1-NEXT: sete %al ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq @@ -1189,7 +1189,7 @@ ; SSE-NEXT: pand %xmm0, %xmm1 ; SSE-NEXT: psllw $7, %xmm1 ; SSE-NEXT: pmovmskb %xmm1, %eax -; SSE-NEXT: cmpw $-1, %ax +; SSE-NEXT: cmpl $65535, %eax # imm = 0xFFFF ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; @@ -1202,7 +1202,7 @@ ; AVX1-NEXT: vpand %xmm0, %xmm2, %xmm0 ; AVX1-NEXT: vpsllw $7, %xmm0, %xmm0 ; AVX1-NEXT: vpmovmskb %xmm0, %eax -; AVX1-NEXT: cmpw $-1, %ax +; AVX1-NEXT: cmpl $65535, %eax # imm = 0xFFFF ; AVX1-NEXT: sete %al ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq @@ -1371,7 +1371,7 @@ ; SSE-NEXT: psllw $15, %xmm0 ; SSE-NEXT: packsswb %xmm1, %xmm0 ; SSE-NEXT: pmovmskb %xmm0, %eax -; SSE-NEXT: cmpw $-1, %ax +; SSE-NEXT: cmpl $65535, %eax # imm = 0xFFFF ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; @@ -1382,7 +1382,7 @@ ; AVX1-NEXT: vpsllw $15, %xmm0, %xmm0 ; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpmovmskb %xmm0, %eax -; AVX1-NEXT: cmpw $-1, %ax +; AVX1-NEXT: cmpl $65535, %eax # imm = 0xFFFF ; AVX1-NEXT: sete %al ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq @@ -1393,7 +1393,7 @@ ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 ; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vpmovmskb %xmm0, %eax -; AVX2-NEXT: cmpw $-1, %ax +; AVX2-NEXT: cmpl $65535, %eax # imm = 0xFFFF ; AVX2-NEXT: sete %al ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq @@ -1434,7 +1434,7 @@ ; SSE-NEXT: packsswb %xmm3, %xmm2 ; SSE-NEXT: pand %xmm0, %xmm2 ; SSE-NEXT: pmovmskb %xmm2, %eax -; SSE-NEXT: cmpw $-1, %ax +; SSE-NEXT: cmpl $65535, %eax # imm = 0xFFFF ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; @@ -1450,7 +1450,7 @@ ; AVX1-NEXT: vpacksswb %xmm2, %xmm1, %xmm1 ; AVX1-NEXT: vpand %xmm0, %xmm1, %xmm0 ; AVX1-NEXT: vpmovmskb %xmm0, %eax -; AVX1-NEXT: cmpw $-1, %ax +; AVX1-NEXT: cmpl $65535, %eax # imm = 0xFFFF ; AVX1-NEXT: sete %al ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq @@ -1585,7 +1585,7 @@ ; SSE: # %bb.0: ; SSE-NEXT: pslld $31, %xmm0 ; SSE-NEXT: movmskps %xmm0, %eax -; SSE-NEXT: cmpb $15, %al +; SSE-NEXT: cmpl $15, %eax ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; @@ -1593,7 +1593,7 @@ ; AVX1OR2: # %bb.0: ; AVX1OR2-NEXT: vpslld $31, %xmm0, %xmm0 ; AVX1OR2-NEXT: vmovmskps %xmm0, %eax -; AVX1OR2-NEXT: cmpb $15, %al +; AVX1OR2-NEXT: cmpl $15, %eax ; AVX1OR2-NEXT: sete %al ; AVX1OR2-NEXT: retq ; @@ -1689,7 +1689,7 @@ ; AVX2: # %bb.0: ; AVX2-NEXT: vpslld $31, %ymm0, %ymm0 ; AVX2-NEXT: vmovmskps %ymm0, %eax -; AVX2-NEXT: cmpb $-1, %al +; AVX2-NEXT: cmpl $255, %eax ; AVX2-NEXT: sete %al ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq @@ -1775,7 +1775,7 @@ ; SSE-NEXT: packssdw %xmm1, %xmm0 ; SSE-NEXT: packsswb %xmm2, %xmm0 ; SSE-NEXT: pmovmskb %xmm0, %eax -; SSE-NEXT: cmpw $-1, %ax +; SSE-NEXT: cmpl $65535, %eax # imm = 0xFFFF ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; @@ -1791,7 +1791,7 @@ ; AVX1-NEXT: vpackssdw %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpmovmskb %xmm0, %eax -; AVX1-NEXT: cmpw $-1, %ax +; AVX1-NEXT: cmpl $65535, %eax # imm = 0xFFFF ; AVX1-NEXT: sete %al ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq @@ -1880,7 +1880,7 @@ ; SSE: # %bb.0: ; SSE-NEXT: psllq $63, %xmm0 ; SSE-NEXT: movmskpd %xmm0, %eax -; SSE-NEXT: cmpb $3, %al +; SSE-NEXT: cmpl $3, %eax ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; @@ -1888,7 +1888,7 @@ ; AVX1OR2: # %bb.0: ; AVX1OR2-NEXT: vpsllq $63, %xmm0, %xmm0 ; AVX1OR2-NEXT: vmovmskpd %xmm0, %eax -; AVX1OR2-NEXT: cmpb $3, %al +; AVX1OR2-NEXT: cmpl $3, %eax ; AVX1OR2-NEXT: sete %al ; AVX1OR2-NEXT: retq ; @@ -1964,7 +1964,7 @@ ; SSE-NEXT: psllq $63, %xmm0 ; SSE-NEXT: packssdw %xmm1, %xmm0 ; SSE-NEXT: movmskps %xmm0, %eax -; SSE-NEXT: cmpb $15, %al +; SSE-NEXT: cmpl $15, %eax ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; @@ -1983,7 +1983,7 @@ ; AVX2: # %bb.0: ; AVX2-NEXT: vpsllq $63, %ymm0, %ymm0 ; AVX2-NEXT: vmovmskpd %ymm0, %eax -; AVX2-NEXT: cmpb $15, %al +; AVX2-NEXT: cmpl $15, %eax ; AVX2-NEXT: sete %al ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq @@ -2098,7 +2098,7 @@ ; AVX2-NEXT: vpsllq $63, %ymm0, %ymm0 ; AVX2-NEXT: vpackssdw %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vmovmskps %ymm0, %eax -; AVX2-NEXT: cmpb $-1, %al +; AVX2-NEXT: cmpl $255, %eax ; AVX2-NEXT: sete %al ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq @@ -2184,7 +2184,7 @@ ; SSE: # %bb.0: ; SSE-NEXT: psllw $5, %xmm0 ; SSE-NEXT: pmovmskb %xmm0, %eax -; SSE-NEXT: cmpw $-1, %ax +; SSE-NEXT: cmpl $65535, %eax # imm = 0xFFFF ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; @@ -2192,7 +2192,7 @@ ; AVX1OR2: # %bb.0: ; AVX1OR2-NEXT: vpsllw $5, %xmm0, %xmm0 ; AVX1OR2-NEXT: vpmovmskb %xmm0, %eax -; AVX1OR2-NEXT: cmpw $-1, %ax +; AVX1OR2-NEXT: cmpl $65535, %eax # imm = 0xFFFF ; AVX1OR2-NEXT: sete %al ; AVX1OR2-NEXT: retq ; @@ -2200,7 +2200,7 @@ ; KNL: # %bb.0: ; KNL-NEXT: vpsllw $5, %xmm0, %xmm0 ; KNL-NEXT: vpmovmskb %xmm0, %eax -; KNL-NEXT: cmpw $-1, %ax +; KNL-NEXT: cmpl $65535, %eax # imm = 0xFFFF ; KNL-NEXT: sete %al ; KNL-NEXT: retq ; @@ -2250,7 +2250,7 @@ ; SSE-NEXT: pand %xmm1, %xmm0 ; SSE-NEXT: psllw $5, %xmm0 ; SSE-NEXT: pmovmskb %xmm0, %eax -; SSE-NEXT: cmpw $-1, %ax +; SSE-NEXT: cmpl $65535, %eax # imm = 0xFFFF ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; @@ -2260,7 +2260,7 @@ ; AVX1-NEXT: vpand %xmm0, %xmm1, %xmm0 ; AVX1-NEXT: vpsllw $5, %xmm0, %xmm0 ; AVX1-NEXT: vpmovmskb %xmm0, %eax -; AVX1-NEXT: cmpw $-1, %ax +; AVX1-NEXT: cmpl $65535, %eax # imm = 0xFFFF ; AVX1-NEXT: sete %al ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq @@ -2335,7 +2335,7 @@ ; SSE-NEXT: pand %xmm0, %xmm1 ; SSE-NEXT: psllw $5, %xmm1 ; SSE-NEXT: pmovmskb %xmm1, %eax -; SSE-NEXT: cmpw $-1, %ax +; SSE-NEXT: cmpl $65535, %eax # imm = 0xFFFF ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; @@ -2348,7 +2348,7 @@ ; AVX1-NEXT: vpand %xmm0, %xmm2, %xmm0 ; AVX1-NEXT: vpsllw $5, %xmm0, %xmm0 ; AVX1-NEXT: vpmovmskb %xmm0, %eax -; AVX1-NEXT: cmpw $-1, %ax +; AVX1-NEXT: cmpl $65535, %eax # imm = 0xFFFF ; AVX1-NEXT: sete %al ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq @@ -2517,7 +2517,7 @@ ; SSE-NEXT: psllw $13, %xmm0 ; SSE-NEXT: packsswb %xmm1, %xmm0 ; SSE-NEXT: pmovmskb %xmm0, %eax -; SSE-NEXT: cmpw $-1, %ax +; SSE-NEXT: cmpl $65535, %eax # imm = 0xFFFF ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; @@ -2528,7 +2528,7 @@ ; AVX1-NEXT: vpsllw $13, %xmm0, %xmm0 ; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpmovmskb %xmm0, %eax -; AVX1-NEXT: cmpw $-1, %ax +; AVX1-NEXT: cmpl $65535, %eax # imm = 0xFFFF ; AVX1-NEXT: sete %al ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq @@ -2539,7 +2539,7 @@ ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 ; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vpmovmskb %xmm0, %eax -; AVX2-NEXT: cmpw $-1, %ax +; AVX2-NEXT: cmpl $65535, %eax # imm = 0xFFFF ; AVX2-NEXT: sete %al ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq @@ -2580,7 +2580,7 @@ ; SSE-NEXT: packsswb %xmm3, %xmm2 ; SSE-NEXT: pand %xmm0, %xmm2 ; SSE-NEXT: pmovmskb %xmm2, %eax -; SSE-NEXT: cmpw $-1, %ax +; SSE-NEXT: cmpl $65535, %eax # imm = 0xFFFF ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; @@ -2596,7 +2596,7 @@ ; AVX1-NEXT: vpacksswb %xmm2, %xmm1, %xmm1 ; AVX1-NEXT: vpand %xmm0, %xmm1, %xmm0 ; AVX1-NEXT: vpmovmskb %xmm0, %eax -; AVX1-NEXT: cmpw $-1, %ax +; AVX1-NEXT: cmpl $65535, %eax # imm = 0xFFFF ; AVX1-NEXT: sete %al ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq @@ -2731,7 +2731,7 @@ ; SSE: # %bb.0: ; SSE-NEXT: pslld $29, %xmm0 ; SSE-NEXT: movmskps %xmm0, %eax -; SSE-NEXT: cmpb $15, %al +; SSE-NEXT: cmpl $15, %eax ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; @@ -2739,7 +2739,7 @@ ; AVX1OR2: # %bb.0: ; AVX1OR2-NEXT: vpslld $29, %xmm0, %xmm0 ; AVX1OR2-NEXT: vmovmskps %xmm0, %eax -; AVX1OR2-NEXT: cmpb $15, %al +; AVX1OR2-NEXT: cmpl $15, %eax ; AVX1OR2-NEXT: sete %al ; AVX1OR2-NEXT: retq ; @@ -2835,7 +2835,7 @@ ; AVX2: # %bb.0: ; AVX2-NEXT: vpslld $29, %ymm0, %ymm0 ; AVX2-NEXT: vmovmskps %ymm0, %eax -; AVX2-NEXT: cmpb $-1, %al +; AVX2-NEXT: cmpl $255, %eax ; AVX2-NEXT: sete %al ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq @@ -2921,7 +2921,7 @@ ; SSE-NEXT: packssdw %xmm1, %xmm0 ; SSE-NEXT: packsswb %xmm2, %xmm0 ; SSE-NEXT: pmovmskb %xmm0, %eax -; SSE-NEXT: cmpw $-1, %ax +; SSE-NEXT: cmpl $65535, %eax # imm = 0xFFFF ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; @@ -2937,7 +2937,7 @@ ; AVX1-NEXT: vpackssdw %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpmovmskb %xmm0, %eax -; AVX1-NEXT: cmpw $-1, %ax +; AVX1-NEXT: cmpl $65535, %eax # imm = 0xFFFF ; AVX1-NEXT: sete %al ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq @@ -3026,7 +3026,7 @@ ; SSE: # %bb.0: ; SSE-NEXT: psllq $61, %xmm0 ; SSE-NEXT: movmskpd %xmm0, %eax -; SSE-NEXT: cmpb $3, %al +; SSE-NEXT: cmpl $3, %eax ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; @@ -3034,7 +3034,7 @@ ; AVX1OR2: # %bb.0: ; AVX1OR2-NEXT: vpsllq $61, %xmm0, %xmm0 ; AVX1OR2-NEXT: vmovmskpd %xmm0, %eax -; AVX1OR2-NEXT: cmpb $3, %al +; AVX1OR2-NEXT: cmpl $3, %eax ; AVX1OR2-NEXT: sete %al ; AVX1OR2-NEXT: retq ; @@ -3110,7 +3110,7 @@ ; SSE-NEXT: psllq $61, %xmm0 ; SSE-NEXT: packssdw %xmm1, %xmm0 ; SSE-NEXT: movmskps %xmm0, %eax -; SSE-NEXT: cmpb $15, %al +; SSE-NEXT: cmpl $15, %eax ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; @@ -3129,7 +3129,7 @@ ; AVX2: # %bb.0: ; AVX2-NEXT: vpsllq $61, %ymm0, %ymm0 ; AVX2-NEXT: vmovmskpd %ymm0, %eax -; AVX2-NEXT: cmpb $15, %al +; AVX2-NEXT: cmpl $15, %eax ; AVX2-NEXT: sete %al ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq @@ -3244,7 +3244,7 @@ ; AVX2-NEXT: vpsllq $61, %ymm0, %ymm0 ; AVX2-NEXT: vpackssdw %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vmovmskps %ymm0, %eax -; AVX2-NEXT: cmpb $-1, %al +; AVX2-NEXT: cmpl $255, %eax ; AVX2-NEXT: sete %al ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq @@ -3689,8 +3689,7 @@ ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,0,3,2] ; SSE2-NEXT: pand %xmm0, %xmm1 ; SSE2-NEXT: movmskpd %xmm1, %eax -; SSE2-NEXT: xorl $3, %eax -; SSE2-NEXT: cmpb $3, %al +; SSE2-NEXT: testl %eax, %eax ; SSE2-NEXT: sete %al ; SSE2-NEXT: retq ; @@ -3698,17 +3697,14 @@ ; SSE41: # %bb.0: ; SSE41-NEXT: pcmpeqq %xmm1, %xmm0 ; SSE41-NEXT: movmskpd %xmm0, %eax -; SSE41-NEXT: xorl $3, %eax -; SSE41-NEXT: cmpb $3, %al +; SSE41-NEXT: testl %eax, %eax ; SSE41-NEXT: sete %al ; SSE41-NEXT: retq ; ; AVX1OR2-LABEL: movmsk_and_v2i64: ; AVX1OR2: # %bb.0: ; AVX1OR2-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0 -; AVX1OR2-NEXT: vmovmskpd %xmm0, %eax -; AVX1OR2-NEXT: xorl $3, %eax -; AVX1OR2-NEXT: cmpb $3, %al +; AVX1OR2-NEXT: vtestpd %xmm0, %xmm0 ; AVX1OR2-NEXT: sete %al ; AVX1OR2-NEXT: retq ; @@ -3820,7 +3816,7 @@ ; SSE: # %bb.0: ; SSE-NEXT: cmplepd %xmm0, %xmm1 ; SSE-NEXT: movmskpd %xmm1, %eax -; SSE-NEXT: cmpb $3, %al +; SSE-NEXT: cmpl $3, %eax ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; @@ -3828,7 +3824,7 @@ ; AVX1OR2: # %bb.0: ; AVX1OR2-NEXT: vcmplepd %xmm0, %xmm1, %xmm0 ; AVX1OR2-NEXT: vmovmskpd %xmm0, %eax -; AVX1OR2-NEXT: cmpb $3, %al +; AVX1OR2-NEXT: cmpl $3, %eax ; AVX1OR2-NEXT: sete %al ; AVX1OR2-NEXT: retq ; @@ -4248,7 +4244,7 @@ ; SSE: # %bb.0: ; SSE-NEXT: cmpltpd %xmm0, %xmm1 ; SSE-NEXT: movmskpd %xmm1, %eax -; SSE-NEXT: cmpb $3, %al +; SSE-NEXT: cmpl $3, %eax ; SSE-NEXT: movl $42, %ecx ; SSE-NEXT: movl $99, %eax ; SSE-NEXT: cmovel %ecx, %eax @@ -4258,7 +4254,7 @@ ; AVX1OR2: # %bb.0: ; AVX1OR2-NEXT: vcmpltpd %xmm0, %xmm1, %xmm0 ; AVX1OR2-NEXT: vmovmskpd %xmm0, %eax -; AVX1OR2-NEXT: cmpb $3, %al +; AVX1OR2-NEXT: cmpl $3, %eax ; AVX1OR2-NEXT: movl $42, %ecx ; AVX1OR2-NEXT: movl $99, %eax ; AVX1OR2-NEXT: cmovel %ecx, %eax diff --git a/llvm/test/CodeGen/X86/setcc-logic.ll b/llvm/test/CodeGen/X86/setcc-logic.ll --- a/llvm/test/CodeGen/X86/setcc-logic.ll +++ b/llvm/test/CodeGen/X86/setcc-logic.ll @@ -324,7 +324,7 @@ ; CHECK-NEXT: xorpd %xmm1, %xmm1 ; CHECK-NEXT: cmpltpd %xmm0, %xmm1 ; CHECK-NEXT: movmskpd %xmm1, %eax -; CHECK-NEXT: cmpb $3, %al +; CHECK-NEXT: cmpl $3, %eax ; CHECK-NEXT: jne .LBB16_2 ; CHECK-NEXT: # %bb.1: # %true ; CHECK-NEXT: movl $42, %eax diff --git a/llvm/test/CodeGen/X86/vector-compare-all_of.ll b/llvm/test/CodeGen/X86/vector-compare-all_of.ll --- a/llvm/test/CodeGen/X86/vector-compare-all_of.ll +++ b/llvm/test/CodeGen/X86/vector-compare-all_of.ll @@ -934,7 +934,7 @@ ; SSE: # %bb.0: ; SSE-NEXT: cmpltpd %xmm0, %xmm1 ; SSE-NEXT: movmskpd %xmm1, %eax -; SSE-NEXT: cmpb $3, %al +; SSE-NEXT: cmpl $3, %eax ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; @@ -942,7 +942,7 @@ ; AVX1OR2: # %bb.0: ; AVX1OR2-NEXT: vcmpltpd %xmm0, %xmm1, %xmm0 ; AVX1OR2-NEXT: vmovmskpd %xmm0, %eax -; AVX1OR2-NEXT: cmpb $3, %al +; AVX1OR2-NEXT: cmpl $3, %eax ; AVX1OR2-NEXT: sete %al ; AVX1OR2-NEXT: retq ; @@ -965,7 +965,7 @@ ; SSE: # %bb.0: ; SSE-NEXT: cmpeqps %xmm1, %xmm0 ; SSE-NEXT: movmskps %xmm0, %eax -; SSE-NEXT: cmpb $15, %al +; SSE-NEXT: cmpl $15, %eax ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; @@ -973,7 +973,7 @@ ; AVX1OR2: # %bb.0: ; AVX1OR2-NEXT: vcmpeqps %xmm1, %xmm0, %xmm0 ; AVX1OR2-NEXT: vmovmskps %xmm0, %eax -; AVX1OR2-NEXT: cmpb $15, %al +; AVX1OR2-NEXT: cmpl $15, %eax ; AVX1OR2-NEXT: sete %al ; AVX1OR2-NEXT: retq ; @@ -1000,7 +1000,7 @@ ; SSE-NEXT: cmplepd %xmm0, %xmm2 ; SSE-NEXT: packssdw %xmm3, %xmm2 ; SSE-NEXT: movmskps %xmm2, %eax -; SSE-NEXT: cmpb $15, %al +; SSE-NEXT: cmpl $15, %eax ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; @@ -1008,7 +1008,7 @@ ; AVX1OR2: # %bb.0: ; AVX1OR2-NEXT: vcmplepd %ymm0, %ymm1, %ymm0 ; AVX1OR2-NEXT: vmovmskpd %ymm0, %eax -; AVX1OR2-NEXT: cmpb $15, %al +; AVX1OR2-NEXT: cmpl $15, %eax ; AVX1OR2-NEXT: sete %al ; AVX1OR2-NEXT: vzeroupper ; AVX1OR2-NEXT: retq @@ -1046,7 +1046,7 @@ ; AVX1OR2: # %bb.0: ; AVX1OR2-NEXT: vcmpneqps %ymm1, %ymm0, %ymm0 ; AVX1OR2-NEXT: vmovmskps %ymm0, %eax -; AVX1OR2-NEXT: cmpb $-1, %al +; AVX1OR2-NEXT: cmpl $255, %eax ; AVX1OR2-NEXT: sete %al ; AVX1OR2-NEXT: vzeroupper ; AVX1OR2-NEXT: retq @@ -1077,8 +1077,7 @@ ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,0,3,2] ; SSE2-NEXT: pand %xmm0, %xmm1 ; SSE2-NEXT: movmskpd %xmm1, %eax -; SSE2-NEXT: xorl $3, %eax -; SSE2-NEXT: cmpb $3, %al +; SSE2-NEXT: testl %eax, %eax ; SSE2-NEXT: sete %al ; SSE2-NEXT: retq ; @@ -1086,17 +1085,14 @@ ; SSE42: # %bb.0: ; SSE42-NEXT: pcmpeqq %xmm1, %xmm0 ; SSE42-NEXT: movmskpd %xmm0, %eax -; SSE42-NEXT: xorl $3, %eax -; SSE42-NEXT: cmpb $3, %al +; SSE42-NEXT: testl %eax, %eax ; SSE42-NEXT: sete %al ; SSE42-NEXT: retq ; ; AVX1OR2-LABEL: bool_reduction_v2i64: ; AVX1OR2: # %bb.0: ; AVX1OR2-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0 -; AVX1OR2-NEXT: vmovmskpd %xmm0, %eax -; AVX1OR2-NEXT: xorl $3, %eax -; AVX1OR2-NEXT: cmpb $3, %al +; AVX1OR2-NEXT: vtestpd %xmm0, %xmm0 ; AVX1OR2-NEXT: sete %al ; AVX1OR2-NEXT: retq ; @@ -1122,7 +1118,7 @@ ; SSE2-NEXT: pxor %xmm2, %xmm0 ; SSE2-NEXT: pcmpgtd %xmm1, %xmm0 ; SSE2-NEXT: movmskps %xmm0, %eax -; SSE2-NEXT: cmpb $15, %al +; SSE2-NEXT: cmpl $15, %eax ; SSE2-NEXT: sete %al ; SSE2-NEXT: retq ; @@ -1131,8 +1127,7 @@ ; SSE42-NEXT: pminud %xmm0, %xmm1 ; SSE42-NEXT: pcmpeqd %xmm0, %xmm1 ; SSE42-NEXT: movmskps %xmm1, %eax -; SSE42-NEXT: xorl $15, %eax -; SSE42-NEXT: cmpb $15, %al +; SSE42-NEXT: testl %eax, %eax ; SSE42-NEXT: sete %al ; SSE42-NEXT: retq ; @@ -1140,9 +1135,7 @@ ; AVX1OR2: # %bb.0: ; AVX1OR2-NEXT: vpminud %xmm1, %xmm0, %xmm1 ; AVX1OR2-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 -; AVX1OR2-NEXT: vmovmskps %xmm0, %eax -; AVX1OR2-NEXT: xorl $15, %eax -; AVX1OR2-NEXT: cmpb $15, %al +; AVX1OR2-NEXT: vtestps %xmm0, %xmm0 ; AVX1OR2-NEXT: sete %al ; AVX1OR2-NEXT: retq ; @@ -1204,7 +1197,7 @@ ; SSE: # %bb.0: ; SSE-NEXT: pcmpgtb %xmm1, %xmm0 ; SSE-NEXT: pmovmskb %xmm0, %eax -; SSE-NEXT: cmpw $-1, %ax +; SSE-NEXT: cmpl $65535, %eax # imm = 0xFFFF ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; @@ -1212,7 +1205,7 @@ ; AVX1OR2: # %bb.0: ; AVX1OR2-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm0 ; AVX1OR2-NEXT: vpmovmskb %xmm0, %eax -; AVX1OR2-NEXT: cmpw $-1, %ax +; AVX1OR2-NEXT: cmpl $65535, %eax # imm = 0xFFFF ; AVX1OR2-NEXT: sete %al ; AVX1OR2-NEXT: retq ; @@ -1257,7 +1250,7 @@ ; SSE2-NEXT: por %xmm2, %xmm0 ; SSE2-NEXT: packssdw %xmm1, %xmm0 ; SSE2-NEXT: movmskps %xmm0, %eax -; SSE2-NEXT: cmpb $15, %al +; SSE2-NEXT: cmpl $15, %eax ; SSE2-NEXT: sete %al ; SSE2-NEXT: retq ; @@ -1267,7 +1260,7 @@ ; SSE42-NEXT: pcmpgtq %xmm0, %xmm2 ; SSE42-NEXT: packssdw %xmm3, %xmm2 ; SSE42-NEXT: movmskps %xmm2, %eax -; SSE42-NEXT: cmpb $15, %al +; SSE42-NEXT: cmpl $15, %eax ; SSE42-NEXT: sete %al ; SSE42-NEXT: retq ; @@ -1288,7 +1281,7 @@ ; AVX2: # %bb.0: ; AVX2-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm0 ; AVX2-NEXT: vmovmskpd %ymm0, %eax -; AVX2-NEXT: cmpb $15, %al +; AVX2-NEXT: cmpl $15, %eax ; AVX2-NEXT: sete %al ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq diff --git a/llvm/test/CodeGen/X86/vector-reduce-and-bool.ll b/llvm/test/CodeGen/X86/vector-reduce-and-bool.ll --- a/llvm/test/CodeGen/X86/vector-reduce-and-bool.ll +++ b/llvm/test/CodeGen/X86/vector-reduce-and-bool.ll @@ -16,7 +16,7 @@ ; SSE2: # %bb.0: ; SSE2-NEXT: psllq $63, %xmm0 ; SSE2-NEXT: movmskpd %xmm0, %eax -; SSE2-NEXT: cmpb $3, %al +; SSE2-NEXT: cmpl $3, %eax ; SSE2-NEXT: sete %al ; SSE2-NEXT: retq ; @@ -151,7 +151,7 @@ ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2] ; SSE2-NEXT: pslld $31, %xmm0 ; SSE2-NEXT: movmskps %xmm0, %eax -; SSE2-NEXT: cmpb $15, %al +; SSE2-NEXT: cmpl $15, %eax ; SSE2-NEXT: sete %al ; SSE2-NEXT: retq ;