Index: include/llvm/IR/IntrinsicsX86.td =================================================================== --- include/llvm/IR/IntrinsicsX86.td +++ include/llvm/IR/IntrinsicsX86.td @@ -264,12 +264,6 @@ Intrinsic<[], [llvm_ptr_ty], []>; } -// Misc. -let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". - def int_x86_sse_movmsk_ps : GCCBuiltin<"__builtin_ia32_movmskps">, - Intrinsic<[llvm_i32_ty], [llvm_v4f32_ty], [IntrNoMem]>; -} - //===----------------------------------------------------------------------===// // SSE2 @@ -490,10 +484,6 @@ def int_x86_sse2_packuswb_128 : GCCBuiltin<"__builtin_ia32_packuswb128">, Intrinsic<[llvm_v16i8_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>; - def int_x86_sse2_movmsk_pd : GCCBuiltin<"__builtin_ia32_movmskpd">, - Intrinsic<[llvm_i32_ty], [llvm_v2f64_ty], [IntrNoMem]>; - def int_x86_sse2_pmovmskb_128 : GCCBuiltin<"__builtin_ia32_pmovmskb128">, - Intrinsic<[llvm_i32_ty], [llvm_v16i8_ty], [IntrNoMem]>; def int_x86_sse2_maskmov_dqu : GCCBuiltin<"__builtin_ia32_maskmovdqu">, Intrinsic<[], [llvm_v16i8_ty, llvm_v16i8_ty, llvm_ptr_ty], []>; @@ -1466,14 +1456,6 @@ [IntrNoMem]>; } -// Vector extract sign mask -let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". - def int_x86_avx_movmsk_pd_256 : GCCBuiltin<"__builtin_ia32_movmskpd256">, - Intrinsic<[llvm_i32_ty], [llvm_v4f64_ty], [IntrNoMem]>; - def int_x86_avx_movmsk_ps_256 : GCCBuiltin<"__builtin_ia32_movmskps256">, - Intrinsic<[llvm_i32_ty], [llvm_v8f32_ty], [IntrNoMem]>; -} - // Vector zero let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". def int_x86_avx_vzeroall : GCCBuiltin<"__builtin_ia32_vzeroall">, @@ -2075,8 +2057,6 @@ // Misc. let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". - def int_x86_avx2_pmovmskb : GCCBuiltin<"__builtin_ia32_pmovmskb256">, - Intrinsic<[llvm_i32_ty], [llvm_v32i8_ty], [IntrNoMem]>; def int_x86_avx2_pshuf_b : GCCBuiltin<"__builtin_ia32_pshufb256">, Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty, llvm_v32i8_ty], [IntrNoMem]>; Index: lib/IR/AutoUpgrade.cpp =================================================================== --- lib/IR/AutoUpgrade.cpp +++ lib/IR/AutoUpgrade.cpp @@ -203,6 +203,12 @@ Name.startswith("sse41.pmovzx") || // Added in 3.9 Name.startswith("avx2.pmovsx") || // Added in 3.9 Name.startswith("avx2.pmovzx") || // Added in 3.9 + Name.startswith("sse.movmsk.ps") || // Added in 6.0 + Name.startswith("sse2.movmsk.pd") || // Added in 6.0 + Name.startswith("sse2.pmovmskb.128") || // Added in 6.0 + Name.startswith("avx.movmsk.pd.256") || // Added in 6.0 + Name.startswith("avx.movmsk.ps.256") || // Added in 6.0 + Name.startswith("avx2.pmovmskb") || // Added in 6.0 Name.startswith("avx512.mask.pmovsx") || // Added in 4.0 Name.startswith("avx512.mask.pmovzx") || // Added in 4.0 Name.startswith("avx512.mask.lzcnt.") || // Added in 5.0 @@ -910,6 +916,29 @@ return Builder.CreateSExt(Mask, ReturnOp, "vpmovm2"); } +static Value *EmitX86Mask(IRBuilder<> &Builder, ArrayRef Ops) { + Type *Typ1 = Ops[0]->getType(); + Value *Cmp = Builder.CreateICmp(CmpInst::Predicate::ICMP_SLT, Ops[0], + ConstantVector::getNullValue(Typ1)); + Value *BitCast = Builder.CreateBitCast( + Cmp, Type::getIntNTy(Builder.getContext(), Typ1->getVectorNumElements())); + return (Typ1->getVectorNumElements() < 32) + ? Builder.CreateZExt(BitCast, + Type::getInt32Ty(Builder.getContext())) + : BitCast; +} + +/// Converting floating point (32/64 bit) into integer. +static Value *EmitX86MaskFloat(IRBuilder<> &Builder, ArrayRef Ops) { + unsigned DstTypEle = Ops[0]->getType()->getVectorNumElements(); + Type *New = Ops[0]->getType()->getScalarSizeInBits() == 32 + ? Type::getInt32Ty(Builder.getContext()) + : Type::getInt64Ty(Builder.getContext()); + Type *DstTyp = VectorType::get(New, DstTypEle); + Value *BitCastFloatToInt = Builder.CreateBitCast(Ops[0], DstTyp); + return EmitX86Mask(Builder, BitCastFloatToInt); +} + /// Upgrade a call to an old intrinsic. All argument and return casting must be /// provided to seamlessly integrate with existing context. void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) { @@ -1278,6 +1307,14 @@ if (CI->getNumArgOperands() == 3) Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep, CI->getArgOperand(1)); + } else if (IsX86 && (Name.startswith("avx2.pmovmskb")|| + (Name.startswith("sse2.pmovmskb.128")))) { + Rep = EmitX86Mask(Builder, CI->getOperand(0)); + } else if (IsX86 && (Name.startswith("sse.movmsk.ps") || + Name.startswith("sse2.movmsk.pd") || + Name.startswith("avx.movmsk.pd.256") || + Name.startswith("avx.movmsk.ps.256"))) { + Rep = EmitX86MaskFloat(Builder, CI->getOperand(0)); } else if (IsX86 && (Name.startswith("avx.vbroadcastf128") || Name == "avx2.vbroadcasti128")) { // Replace vbroadcastf128/vbroadcasti128 with a vector load+shuffle. Index: lib/Target/X86/X86ISelLowering.cpp =================================================================== --- lib/Target/X86/X86ISelLowering.cpp +++ lib/Target/X86/X86ISelLowering.cpp @@ -30124,6 +30124,18 @@ DAG.getBitcast(MVT::v2i64, Res)); } + // Combine (bitcast(SETCC Vec , All zero Vec , SETLT),i32/i64) into + // X86ISDMOVMSK Vec. + if (N0->getOpcode() == ISD::SETCC && !VT.isFloatingPoint() && + !VT.isVector()) { + ISD::CondCode CC = cast(N0.getOperand(2))->get(); + SDValue LHS = N0.getOperand(0); + SDValue RHS = N0.getOperand(1); + if (!LHS.isUndef() && isNullConstant(RHS.getOperand(0)) && + CC == ISD::CondCode::SETLT) + return DAG.getNode(X86ISD::MOVMSK, SDLoc(N0), VT, LHS); + } + // Convert a bitcasted integer logic operation that has one bitcasted // floating-point operand into a floating-point logic operation. This may // create a load of a constant, but that is cheaper than materializing the @@ -30159,7 +30171,7 @@ SDValue CastedOp0 = DAG.getBitcast(VT, LogicOp0); return DAG.getNode(FPOpcode, DL0, VT, LogicOp1.getOperand(0), CastedOp0); } - + return SDValue(); } @@ -35693,6 +35705,26 @@ SDValue N0 = N->getOperand(0); EVT VT = N->getValueType(0); + // Combine (i32 zext(bitcast(setcc(bitcast(vec1),vec2)))) => MOVMSK vec1 + // Combine (i64 zext(bitcast(setcc(bitcast(vec1),vec2)))) => + // (i64 zext(MOVMSK vec1)) + if ((VT == MVT::i32 || VT == MVT::i64) && N0.getOpcode() == ISD::BITCAST && + N0.getOperand(0).getOpcode() == ISD::SETCC) { + SDValue N00 = N0.getOperand(0); + if (N00.getOperand(0).getOpcode() == ISD::BITCAST && + N00.getOperand(0).getOperand(0).getValueType().isFloatingPoint()) { + SDValue MaskI32 = + DAG.getNode(X86ISD::MOVMSK, dl, MVT::i32, + N0.getOperand(0).getOperand(0).getOperand(0)); + return (VT == MVT::i32) + ? MaskI32 + : DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i64, MaskI32); + } + if (VT == MVT::i32 && + N00.getOperand(0).getValueType().getScalarSizeInBits() == 8) + return DAG.getNode(X86ISD::MOVMSK, dl, VT, N00.getOperand(0)); + } + if (N0.getOpcode() == ISD::AND && N0.hasOneUse() && N0.getOperand(0).hasOneUse()) { @@ -35740,7 +35772,7 @@ if (SDValue R = combineOrCmpEqZeroToCtlzSrl(N, DAG, DCI, Subtarget)) return R; - + return SDValue(); } @@ -36718,6 +36750,26 @@ return SDValue(); } +// Combine (X86ISD:MOVMSK(SIGN_EXTEND(SETCC Vec1, allzeros, SETLT))) => +// (X86ISD:MOVMSK Vec1) +static SDValue combineMOVMSK(SDNode *N, SelectionDAG &DAG, + const X86Subtarget &Subtarget) { + MVT VT = N->getSimpleValueType(0); + SDLoc DL(N); + + if (N->getOperand(0)->getOpcode() == ISD::SIGN_EXTEND) { + SDValue N0 = N->getOperand(0); + if (N0.getOperand(0).getOpcode() == ISD::SETCC) { + SDValue N00 = N0.getOperand(0); + SDValue LHS = N00.getOperand(0); + SDValue RHS = N00.getOperand(1); + ISD::CondCode CC = cast(N00.getOperand(2))->get(); + if (isNullConstant(RHS.getOperand(0)) && CC == ISD::SETLT) + return DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, LHS); + } + } + return SDValue(); +} static SDValue combineVectorCompare(SDNode *N, SelectionDAG &DAG, const X86Subtarget &Subtarget) { @@ -37023,6 +37075,7 @@ case X86ISD::TESTM: return combineTestM(N, DAG, Subtarget); case X86ISD::PCMPEQ: case X86ISD::PCMPGT: return combineVectorCompare(N, DAG, Subtarget); + case X86ISD::MOVMSK: return combineMOVMSK(N, DAG, Subtarget); } return SDValue(); Index: lib/Target/X86/X86IntrinsicsInfo.h =================================================================== --- lib/Target/X86/X86IntrinsicsInfo.h +++ lib/Target/X86/X86IntrinsicsInfo.h @@ -385,8 +385,6 @@ X86_INTRINSIC_DATA(avx_max_ps_256, INTR_TYPE_2OP, X86ISD::FMAX, 0), X86_INTRINSIC_DATA(avx_min_pd_256, INTR_TYPE_2OP, X86ISD::FMIN, 0), X86_INTRINSIC_DATA(avx_min_ps_256, INTR_TYPE_2OP, X86ISD::FMIN, 0), - X86_INTRINSIC_DATA(avx_movmsk_pd_256, INTR_TYPE_1OP, X86ISD::MOVMSK, 0), - X86_INTRINSIC_DATA(avx_movmsk_ps_256, INTR_TYPE_1OP, X86ISD::MOVMSK, 0), X86_INTRINSIC_DATA(avx_rcp_ps_256, INTR_TYPE_1OP, X86ISD::FRCP, 0), X86_INTRINSIC_DATA(avx_round_pd_256, ROUNDP, X86ISD::VRNDSCALE, 0), X86_INTRINSIC_DATA(avx_round_ps_256, ROUNDP, X86ISD::VRNDSCALE, 0), @@ -411,7 +409,6 @@ X86_INTRINSIC_DATA(avx2_phsub_w, INTR_TYPE_2OP, X86ISD::HSUB, 0), X86_INTRINSIC_DATA(avx2_pmadd_ub_sw, INTR_TYPE_2OP, X86ISD::VPMADDUBSW, 0), X86_INTRINSIC_DATA(avx2_pmadd_wd, INTR_TYPE_2OP, X86ISD::VPMADDWD, 0), - X86_INTRINSIC_DATA(avx2_pmovmskb, INTR_TYPE_1OP, X86ISD::MOVMSK, 0), X86_INTRINSIC_DATA(avx2_pmul_dq, INTR_TYPE_2OP, X86ISD::PMULDQ, 0), X86_INTRINSIC_DATA(avx2_pmul_hr_sw, INTR_TYPE_2OP, X86ISD::MULHRS, 0), X86_INTRINSIC_DATA(avx2_pmulh_w, INTR_TYPE_2OP, ISD::MULHS, 0), @@ -1597,7 +1594,6 @@ X86_INTRINSIC_DATA(sse_max_ss, INTR_TYPE_2OP, X86ISD::FMAXS, 0), X86_INTRINSIC_DATA(sse_min_ps, INTR_TYPE_2OP, X86ISD::FMIN, 0), X86_INTRINSIC_DATA(sse_min_ss, INTR_TYPE_2OP, X86ISD::FMINS, 0), - X86_INTRINSIC_DATA(sse_movmsk_ps, INTR_TYPE_1OP, X86ISD::MOVMSK, 0), X86_INTRINSIC_DATA(sse_rcp_ps, INTR_TYPE_1OP, X86ISD::FRCP, 0), X86_INTRINSIC_DATA(sse_rsqrt_ps, INTR_TYPE_1OP, X86ISD::FRSQRT, 0), X86_INTRINSIC_DATA(sse_sqrt_ps, INTR_TYPE_1OP, ISD::FSQRT, 0), @@ -1623,7 +1619,6 @@ X86_INTRINSIC_DATA(sse2_max_sd, INTR_TYPE_2OP, X86ISD::FMAXS, 0), X86_INTRINSIC_DATA(sse2_min_pd, INTR_TYPE_2OP, X86ISD::FMIN, 0), X86_INTRINSIC_DATA(sse2_min_sd, INTR_TYPE_2OP, X86ISD::FMINS, 0), - X86_INTRINSIC_DATA(sse2_movmsk_pd, INTR_TYPE_1OP, X86ISD::MOVMSK, 0), X86_INTRINSIC_DATA(sse2_packssdw_128, INTR_TYPE_2OP, X86ISD::PACKSS, 0), X86_INTRINSIC_DATA(sse2_packsswb_128, INTR_TYPE_2OP, X86ISD::PACKSS, 0), X86_INTRINSIC_DATA(sse2_packuswb_128, INTR_TYPE_2OP, X86ISD::PACKUS, 0), @@ -1632,7 +1627,6 @@ X86_INTRINSIC_DATA(sse2_paddus_b, INTR_TYPE_2OP, X86ISD::ADDUS, 0), X86_INTRINSIC_DATA(sse2_paddus_w, INTR_TYPE_2OP, X86ISD::ADDUS, 0), X86_INTRINSIC_DATA(sse2_pmadd_wd, INTR_TYPE_2OP, X86ISD::VPMADDWD, 0), - X86_INTRINSIC_DATA(sse2_pmovmskb_128, INTR_TYPE_1OP, X86ISD::MOVMSK, 0), X86_INTRINSIC_DATA(sse2_pmulh_w, INTR_TYPE_2OP, ISD::MULHS, 0), X86_INTRINSIC_DATA(sse2_pmulhu_w, INTR_TYPE_2OP, ISD::MULHU, 0), X86_INTRINSIC_DATA(sse2_pmulu_dq, INTR_TYPE_2OP, X86ISD::PMULUDQ, 0), Index: lib/Transforms/InstCombine/InstCombineCalls.cpp =================================================================== --- lib/Transforms/InstCombine/InstCombineCalls.cpp +++ lib/Transforms/InstCombine/InstCombineCalls.cpp @@ -2319,12 +2319,6 @@ } case Intrinsic::x86_mmx_pmovmskb: - case Intrinsic::x86_sse_movmsk_ps: - case Intrinsic::x86_sse2_movmsk_pd: - case Intrinsic::x86_sse2_pmovmskb_128: - case Intrinsic::x86_avx_movmsk_pd_256: - case Intrinsic::x86_avx_movmsk_ps_256: - case Intrinsic::x86_avx2_pmovmskb: if (Value *V = simplifyX86movmsk(*II)) return replaceInstUsesWith(*II, V); break; Index: lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp =================================================================== --- lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp +++ lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp @@ -638,12 +638,7 @@ break; } case Intrinsic::x86_mmx_pmovmskb: - case Intrinsic::x86_sse_movmsk_ps: - case Intrinsic::x86_sse2_movmsk_pd: - case Intrinsic::x86_sse2_pmovmskb_128: - case Intrinsic::x86_avx_movmsk_ps_256: - case Intrinsic::x86_avx_movmsk_pd_256: - case Intrinsic::x86_avx2_pmovmskb: { + { // MOVMSK copies the vector elements' sign bits to the low bits // and zeros the high bits. unsigned ArgWidth; Index: test/CodeGen/X86/avx-intrinsics-fast-isel.ll =================================================================== --- test/CodeGen/X86/avx-intrinsics-fast-isel.ll +++ test/CodeGen/X86/avx-intrinsics-fast-isel.ll @@ -1539,13 +1539,25 @@ define i32 @test_mm256_movemask_ps(<8 x float> %a0) nounwind { ; X32-LABEL: test_mm256_movemask_ps: ; X32: # BB#0: +; X32-NEXT: vextractf128 $1, %ymm0, %xmm1 +; X32-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; X32-NEXT: vpcmpgtd %xmm1, %xmm2, %xmm1 +; X32-NEXT: vpcmpgtd %xmm0, %xmm2, %xmm0 +; X32-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; X32-NEXT: vmovmskps %ymm0, %eax +; X32-NEXT: movzbl %al, %eax ; X32-NEXT: vzeroupper ; X32-NEXT: retl ; ; X64-LABEL: test_mm256_movemask_ps: ; X64: # BB#0: +; X64-NEXT: vextractf128 $1, %ymm0, %xmm1 +; X64-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; X64-NEXT: vpcmpgtd %xmm1, %xmm2, %xmm1 +; X64-NEXT: vpcmpgtd %xmm0, %xmm2, %xmm0 +; X64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; X64-NEXT: vmovmskps %ymm0, %eax +; X64-NEXT: movzbl %al, %eax ; X64-NEXT: vzeroupper ; X64-NEXT: retq %res = call i32 @llvm.x86.avx.movmsk.ps.256(<8 x float> %a0) Index: test/CodeGen/X86/broadcastm-lowering.ll =================================================================== --- test/CodeGen/X86/broadcastm-lowering.ll +++ test/CodeGen/X86/broadcastm-lowering.ll @@ -43,29 +43,21 @@ define <4 x i32> @test_mm_epi32(<16 x i8> %a, <16 x i8> %b) { ; AVX512CD-LABEL: test_mm_epi32: ; AVX512CD: # BB#0: # %entry -; AVX512CD-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0 -; AVX512CD-NEXT: vpmovsxbd %xmm0, %zmm0 -; AVX512CD-NEXT: vpslld $31, %zmm0, %zmm0 -; AVX512CD-NEXT: vptestmd %zmm0, %zmm0, %k0 -; AVX512CD-NEXT: kmovw %k0, %eax -; AVX512CD-NEXT: vpxor %xmm0, %xmm0, %xmm0 -; AVX512CD-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0 -; AVX512CD-NEXT: vpinsrw $2, %eax, %xmm0, %xmm0 -; AVX512CD-NEXT: vpinsrw $4, %eax, %xmm0, %xmm0 -; AVX512CD-NEXT: vpinsrw $6, %eax, %xmm0, %xmm0 -; AVX512CD-NEXT: vzeroupper +; AVX512CD-NEXT: vpmovmskb %xmm0, %eax +; AVX512CD-NEXT: vmovd %eax, %xmm0 +; AVX512CD-NEXT: vpbroadcastd %xmm0, %xmm0 ; AVX512CD-NEXT: retq ; ; AVX512VLCDBW-LABEL: test_mm_epi32: ; AVX512VLCDBW: # BB#0: # %entry -; AVX512VLCDBW-NEXT: vpcmpeqb %xmm1, %xmm0, %k0 -; AVX512VLCDBW-NEXT: vpbroadcastmw2d %k0, %xmm0 +; AVX512VLCDBW-NEXT: vpmovmskb %xmm0, %eax +; AVX512VLCDBW-NEXT: vpbroadcastd %eax, %xmm0 ; AVX512VLCDBW-NEXT: retq ; ; X86-AVX512VLCDBW-LABEL: test_mm_epi32: ; X86-AVX512VLCDBW: # BB#0: # %entry -; X86-AVX512VLCDBW-NEXT: vpcmpeqb %xmm1, %xmm0, %k0 -; X86-AVX512VLCDBW-NEXT: vpbroadcastmw2d %k0, %xmm0 +; X86-AVX512VLCDBW-NEXT: vpmovmskb %xmm0, %eax +; X86-AVX512VLCDBW-NEXT: vpbroadcastd %eax, %xmm0 ; X86-AVX512VLCDBW-NEXT: retl entry: %0 = icmp eq <16 x i8> %a, %b Index: test/CodeGen/X86/movmsk.ll =================================================================== --- test/CodeGen/X86/movmsk.ll +++ test/CodeGen/X86/movmsk.ll @@ -133,7 +133,7 @@ define i32 @t2(<4 x float> %x, i32* nocapture %indexTable) nounwind uwtable readonly ssp { ; CHECK-LABEL: t2: ; CHECK: ## BB#0: ## %entry -; CHECK-NEXT: movmskpd %xmm0, %eax +; CHECK-NEXT: movmskps %xmm0, %eax ; CHECK-NEXT: movl (%rdi,%rax,4), %eax ; CHECK-NEXT: retq entry: Index: test/CodeGen/X86/sse2-intrinsics-fast-isel.ll =================================================================== --- test/CodeGen/X86/sse2-intrinsics-fast-isel.ll +++ test/CodeGen/X86/sse2-intrinsics-fast-isel.ll @@ -1826,11 +1826,13 @@ ; X32-LABEL: test_mm_movemask_epi8: ; X32: # BB#0: ; X32-NEXT: pmovmskb %xmm0, %eax +; X32-NEXT: movzwl %ax, %eax ; X32-NEXT: retl ; ; X64-LABEL: test_mm_movemask_epi8: ; X64: # BB#0: ; X64-NEXT: pmovmskb %xmm0, %eax +; X64-NEXT: movzwl %ax, %eax ; X64-NEXT: retq %arg0 = bitcast <2 x i64> %a0 to <16 x i8> %res = call i32 @llvm.x86.sse2.pmovmskb.128(<16 x i8> %arg0) Index: test/Transforms/InstCombine/X86/x86-movmsk.ll =================================================================== --- test/Transforms/InstCombine/X86/x86-movmsk.ll +++ test/Transforms/InstCombine/X86/x86-movmsk.ll @@ -9,7 +9,7 @@ define i32 @test_upper_x86_mmx_pmovmskb(x86_mmx %a0) { ; CHECK-LABEL: @test_upper_x86_mmx_pmovmskb( -; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.x86.mmx.pmovmskb(x86_mmx %a0) +; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.x86.mmx.pmovmskb(x86_mmx [[A0:%.*]]) ; CHECK-NEXT: ret i32 [[TMP1]] ; %1 = call i32 @llvm.x86.mmx.pmovmskb(x86_mmx %a0) @@ -19,8 +19,11 @@ define i32 @test_upper_x86_sse_movmsk_ps(<4 x float> %a0) { ; CHECK-LABEL: @test_upper_x86_sse_movmsk_ps( -; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.x86.sse.movmsk.ps(<4 x float> %a0) -; CHECK-NEXT: ret i32 [[TMP1]] +; CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x float> [[A0:%.*]] to <4 x i32> +; CHECK-NEXT: [[TMP2:%.*]] = icmp slt <4 x i32> [[TMP1]], zeroinitializer +; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i1> [[TMP2]] to i4 +; CHECK-NEXT: [[TMP4:%.*]] = zext i4 [[TMP3]] to i32 +; CHECK-NEXT: ret i32 [[TMP4]] ; %1 = call i32 @llvm.x86.sse.movmsk.ps(<4 x float> %a0) %2 = and i32 %1, 15 @@ -29,8 +32,11 @@ define i32 @test_upper_x86_sse2_movmsk_pd(<2 x double> %a0) { ; CHECK-LABEL: @test_upper_x86_sse2_movmsk_pd( -; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.x86.sse2.movmsk.pd(<2 x double> %a0) -; CHECK-NEXT: ret i32 [[TMP1]] +; CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x double> [[A0:%.*]] to <2 x i64> +; CHECK-NEXT: [[TMP2:%.*]] = icmp slt <2 x i64> [[TMP1]], zeroinitializer +; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i1> [[TMP2]] to i2 +; CHECK-NEXT: [[TMP4:%.*]] = zext i2 [[TMP3]] to i32 +; CHECK-NEXT: ret i32 [[TMP4]] ; %1 = call i32 @llvm.x86.sse2.movmsk.pd(<2 x double> %a0) %2 = and i32 %1, 3 @@ -39,8 +45,10 @@ define i32 @test_upper_x86_sse2_pmovmskb_128(<16 x i8> %a0) { ; CHECK-LABEL: @test_upper_x86_sse2_pmovmskb_128( -; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.x86.sse2.pmovmskb.128(<16 x i8> %a0) -; CHECK-NEXT: ret i32 [[TMP1]] +; CHECK-NEXT: [[TMP1:%.*]] = icmp slt <16 x i8> [[A0:%.*]], zeroinitializer +; CHECK-NEXT: [[TMP2:%.*]] = bitcast <16 x i1> [[TMP1]] to i16 +; CHECK-NEXT: [[TMP3:%.*]] = zext i16 [[TMP2]] to i32 +; CHECK-NEXT: ret i32 [[TMP3]] ; %1 = call i32 @llvm.x86.sse2.pmovmskb.128(<16 x i8> %a0) %2 = and i32 %1, 65535 @@ -49,8 +57,11 @@ define i32 @test_upper_x86_avx_movmsk_ps_256(<8 x float> %a0) { ; CHECK-LABEL: @test_upper_x86_avx_movmsk_ps_256( -; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.x86.avx.movmsk.ps.256(<8 x float> %a0) -; CHECK-NEXT: ret i32 [[TMP1]] +; CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x float> [[A0:%.*]] to <8 x i32> +; CHECK-NEXT: [[TMP2:%.*]] = icmp slt <8 x i32> [[TMP1]], zeroinitializer +; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i1> [[TMP2]] to i8 +; CHECK-NEXT: [[TMP4:%.*]] = zext i8 [[TMP3]] to i32 +; CHECK-NEXT: ret i32 [[TMP4]] ; %1 = call i32 @llvm.x86.avx.movmsk.ps.256(<8 x float> %a0) %2 = and i32 %1, 255 @@ -59,8 +70,11 @@ define i32 @test_upper_x86_avx_movmsk_pd_256(<4 x double> %a0) { ; CHECK-LABEL: @test_upper_x86_avx_movmsk_pd_256( -; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.x86.avx.movmsk.pd.256(<4 x double> %a0) -; CHECK-NEXT: ret i32 [[TMP1]] +; CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x double> [[A0:%.*]] to <4 x i64> +; CHECK-NEXT: [[TMP2:%.*]] = icmp slt <4 x i64> [[TMP1]], zeroinitializer +; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i1> [[TMP2]] to i4 +; CHECK-NEXT: [[TMP4:%.*]] = zext i4 [[TMP3]] to i32 +; CHECK-NEXT: ret i32 [[TMP4]] ; %1 = call i32 @llvm.x86.avx.movmsk.pd.256(<4 x double> %a0) %2 = and i32 %1, 15