Index: test/CodeGen/X86/sse-intrinsics-fast-isel.ll =================================================================== --- test/CodeGen/X86/sse-intrinsics-fast-isel.ll +++ test/CodeGen/X86/sse-intrinsics-fast-isel.ll @@ -196,10 +196,11 @@ ; X64: # BB#0: ; X64-NEXT: cmpeqps %xmm1, %xmm0 ; X64-NEXT: retq - %res = call <4 x float> @llvm.x86.sse.cmp.ps(<4 x float> %a0, <4 x float> %a1, i8 0) + %cmp = fcmp oeq <4 x float> %a0, %a1 + %sext = sext <4 x i1> %cmp to <4 x i32> + %res = bitcast <4 x i32> %sext to <4 x float> ret <4 x float> %res } -declare <4 x float> @llvm.x86.sse.cmp.ps(<4 x float>, <4 x float>, i8) nounwind readnone define <4 x float> @test_mm_cmpeq_ss(<4 x float> %a0, <4 x float> %a1) nounwind { ; X32-LABEL: test_mm_cmpeq_ss: @@ -228,7 +229,9 @@ ; X64-NEXT: cmpleps %xmm0, %xmm1 ; X64-NEXT: movaps %xmm1, %xmm0 ; X64-NEXT: retq - %res = call <4 x float> @llvm.x86.sse.cmp.ps(<4 x float> %a1, <4 x float> %a0, i8 2) + %cmp = fcmp ole <4 x float> %a1, %a0 + %sext = sext <4 x i1> %cmp to <4 x i32> + %res = bitcast <4 x i32> %sext to <4 x float> ret <4 x float> %res } @@ -261,7 +264,9 @@ ; X64-NEXT: cmpltps %xmm0, %xmm1 ; X64-NEXT: movaps %xmm1, %xmm0 ; X64-NEXT: retq - %res = call <4 x float> @llvm.x86.sse.cmp.ps(<4 x float> %a1, <4 x float> %a0, i8 1) + %cmp = fcmp olt <4 x float> %a1, %a0 + %sext = sext <4 x i1> %cmp to <4 x i32> + %res = bitcast <4 x i32> %sext to <4 x float> ret <4 x float> %res } @@ -292,7 +297,9 @@ ; X64: # BB#0: ; X64-NEXT: cmpleps %xmm1, %xmm0 ; X64-NEXT: retq - %res = call <4 x float> @llvm.x86.sse.cmp.ps(<4 x float> %a0, <4 x float> %a1, i8 2) + %cmp = fcmp ole <4 x float> %a0, %a1 + %sext = sext <4 x i1> %cmp to <4 x i32> + %res = bitcast <4 x i32> %sext to <4 x float> ret <4 x float> %res } @@ -320,7 +327,9 @@ ; X64: # BB#0: ; X64-NEXT: cmpltps %xmm1, %xmm0 ; X64-NEXT: retq - %res = call <4 x float> @llvm.x86.sse.cmp.ps(<4 x float> %a0, <4 x float> %a1, i8 1) + %cmp = fcmp olt <4 x float> %a0, %a1 + %sext = sext <4 x i1> %cmp to <4 x i32> + %res = bitcast <4 x i32> %sext to <4 x float> ret <4 x float> %res } @@ -348,7 +357,9 @@ ; X64: # BB#0: ; X64-NEXT: cmpneqps %xmm1, %xmm0 ; X64-NEXT: retq - %res = call <4 x float> @llvm.x86.sse.cmp.ps(<4 x float> %a0, <4 x float> %a1, i8 4) + %cmp = fcmp une <4 x float> %a0, %a1 + %sext = sext <4 x i1> %cmp to <4 x i32> + %res = bitcast <4 x i32> %sext to <4 x float> ret <4 x float> %res } @@ -378,7 +389,9 @@ ; X64-NEXT: cmpnleps %xmm0, %xmm1 ; X64-NEXT: movaps %xmm1, %xmm0 ; X64-NEXT: retq - %res = call <4 x float> @llvm.x86.sse.cmp.ps(<4 x float> %a1, <4 x float> %a0, i8 6) + %cmp = fcmp ugt <4 x float> %a1, %a0 + %sext = sext <4 x i1> %cmp to <4 x i32> + %res = bitcast <4 x i32> %sext to <4 x float> ret <4 x float> %res } @@ -411,7 +424,9 @@ ; X64-NEXT: cmpnltps %xmm0, %xmm1 ; X64-NEXT: movaps %xmm1, %xmm0 ; X64-NEXT: retq - %res = call <4 x float> @llvm.x86.sse.cmp.ps(<4 x float> %a1, <4 x float> %a0, i8 5) + %cmp = fcmp uge <4 x float> %a1, %a0 + %sext = sext <4 x i1> %cmp to <4 x i32> + %res = bitcast <4 x i32> %sext to <4 x float> ret <4 x float> %res } @@ -442,7 +457,9 @@ ; X64: # BB#0: ; X64-NEXT: cmpnleps %xmm1, %xmm0 ; X64-NEXT: retq - %res = call <4 x float> @llvm.x86.sse.cmp.ps(<4 x float> %a0, <4 x float> %a1, i8 6) + %cmp = fcmp ugt <4 x float> %a0, %a1 + %sext = sext <4 x i1> %cmp to <4 x i32> + %res = bitcast <4 x i32> %sext to <4 x float> ret <4 x float> %res } @@ -470,7 +487,9 @@ ; X64: # BB#0: ; X64-NEXT: cmpnltps %xmm1, %xmm0 ; X64-NEXT: retq - %res = call <4 x float> @llvm.x86.sse.cmp.ps(<4 x float> %a0, <4 x float> %a1, i8 5) + %cmp = fcmp uge <4 x float> %a0, %a1 + %sext = sext <4 x i1> %cmp to <4 x i32> + %res = bitcast <4 x i32> %sext to <4 x float> ret <4 x float> %res } @@ -498,7 +517,9 @@ ; X64: # BB#0: ; X64-NEXT: cmpordps %xmm1, %xmm0 ; X64-NEXT: retq - %res = call <4 x float> @llvm.x86.sse.cmp.ps(<4 x float> %a0, <4 x float> %a1, i8 7) + %cmp = fcmp ord <4 x float> %a0, %a1 + %sext = sext <4 x i1> %cmp to <4 x i32> + %res = bitcast <4 x i32> %sext to <4 x float> ret <4 x float> %res } @@ -526,7 +547,9 @@ ; X64: # BB#0: ; X64-NEXT: cmpunordps %xmm1, %xmm0 ; X64-NEXT: retq - %res = call <4 x float> @llvm.x86.sse.cmp.ps(<4 x float> %a0, <4 x float> %a1, i8 3) + %cmp = fcmp uno <4 x float> %a0, %a1 + %sext = sext <4 x i1> %cmp to <4 x i32> + %res = bitcast <4 x i32> %sext to <4 x float> ret <4 x float> %res } Index: test/CodeGen/X86/sse2-intrinsics-fast-isel.ll =================================================================== --- test/CodeGen/X86/sse2-intrinsics-fast-isel.ll +++ test/CodeGen/X86/sse2-intrinsics-fast-isel.ll @@ -460,10 +460,11 @@ ; X64: # BB#0: ; X64-NEXT: cmpeqpd %xmm1, %xmm0 ; X64-NEXT: retq - %res = call <2 x double> @llvm.x86.sse2.cmp.pd(<2 x double> %a0, <2 x double> %a1, i8 0) + %fcmp = fcmp oeq <2 x double> %a0, %a1 + %sext = sext <2 x i1> %fcmp to <2 x i64> + %res = bitcast <2 x i64> %sext to <2 x double> ret <2 x double> %res } -declare <2 x double> @llvm.x86.sse2.cmp.pd(<2 x double>, <2 x double>, i8) nounwind readnone define <2 x double> @test_mm_cmpeq_sd(<2 x double> %a0, <2 x double> %a1) nounwind { ; X32-LABEL: test_mm_cmpeq_sd: @@ -492,7 +493,9 @@ ; X64-NEXT: cmplepd %xmm0, %xmm1 ; X64-NEXT: movapd %xmm1, %xmm0 ; X64-NEXT: retq - %res = call <2 x double> @llvm.x86.sse2.cmp.pd(<2 x double> %a1, <2 x double> %a0, i8 2) + %fcmp = fcmp ole <2 x double> %a1, %a0 + %sext = sext <2 x i1> %fcmp to <2 x i64> + %res = bitcast <2 x i64> %sext to <2 x double> ret <2 x double> %res } @@ -582,7 +585,9 @@ ; X64-NEXT: cmpltpd %xmm0, %xmm1 ; X64-NEXT: movapd %xmm1, %xmm0 ; X64-NEXT: retq - %res = call <2 x double> @llvm.x86.sse2.cmp.pd(<2 x double> %a1, <2 x double> %a0, i8 1) + %fcmp = fcmp olt <2 x double> %a1, %a0 + %sext = sext <2 x i1> %fcmp to <2 x i64> + %res = bitcast <2 x i64> %sext to <2 x double> ret <2 x double> %res } @@ -616,7 +621,9 @@ ; X64: # BB#0: ; X64-NEXT: cmplepd %xmm1, %xmm0 ; X64-NEXT: retq - %res = call <2 x double> @llvm.x86.sse2.cmp.pd(<2 x double> %a0, <2 x double> %a1, i8 2) + %fcmp = fcmp ole <2 x double> %a0, %a1 + %sext = sext <2 x i1> %fcmp to <2 x i64> + %res = bitcast <2 x i64> %sext to <2 x double> ret <2 x double> %res } @@ -704,7 +711,9 @@ ; X64: # BB#0: ; X64-NEXT: cmpltpd %xmm1, %xmm0 ; X64-NEXT: retq - %res = call <2 x double> @llvm.x86.sse2.cmp.pd(<2 x double> %a0, <2 x double> %a1, i8 1) + %fcmp = fcmp olt <2 x double> %a0, %a1 + %sext = sext <2 x i1> %fcmp to <2 x i64> + %res = bitcast <2 x i64> %sext to <2 x double> ret <2 x double> %res } @@ -732,7 +741,9 @@ ; X64: # BB#0: ; X64-NEXT: cmpneqpd %xmm1, %xmm0 ; X64-NEXT: retq - %res = call <2 x double> @llvm.x86.sse2.cmp.pd(<2 x double> %a0, <2 x double> %a1, i8 4) + %fcmp = fcmp une <2 x double> %a0, %a1 + %sext = sext <2 x i1> %fcmp to <2 x i64> + %res = bitcast <2 x i64> %sext to <2 x double> ret <2 x double> %res } @@ -762,7 +773,9 @@ ; X64-NEXT: cmpnlepd %xmm0, %xmm1 ; X64-NEXT: movapd %xmm1, %xmm0 ; X64-NEXT: retq - %res = call <2 x double> @llvm.x86.sse2.cmp.pd(<2 x double> %a1, <2 x double> %a0, i8 6) + %fcmp = fcmp ugt <2 x double> %a1, %a0 + %sext = sext <2 x i1> %fcmp to <2 x i64> + %res = bitcast <2 x i64> %sext to <2 x double> ret <2 x double> %res } @@ -798,7 +811,9 @@ ; X64-NEXT: cmpnltpd %xmm0, %xmm1 ; X64-NEXT: movapd %xmm1, %xmm0 ; X64-NEXT: retq - %res = call <2 x double> @llvm.x86.sse2.cmp.pd(<2 x double> %a1, <2 x double> %a0, i8 5) + %fcmp = fcmp uge <2 x double> %a1, %a0 + %sext = sext <2 x i1> %fcmp to <2 x i64> + %res = bitcast <2 x i64> %sext to <2 x double> ret <2 x double> %res } @@ -832,7 +847,9 @@ ; X64: # BB#0: ; X64-NEXT: cmpnlepd %xmm1, %xmm0 ; X64-NEXT: retq - %res = call <2 x double> @llvm.x86.sse2.cmp.pd(<2 x double> %a0, <2 x double> %a1, i8 6) + %fcmp = fcmp ugt <2 x double> %a0, %a1 + %sext = sext <2 x i1> %fcmp to <2 x i64> + %res = bitcast <2 x i64> %sext to <2 x double> ret <2 x double> %res } @@ -860,7 +877,9 @@ ; X64: # BB#0: ; X64-NEXT: cmpnltpd %xmm1, %xmm0 ; X64-NEXT: retq - %res = call <2 x double> @llvm.x86.sse2.cmp.pd(<2 x double> %a0, <2 x double> %a1, i8 5) + %fcmp = fcmp uge <2 x double> %a0, %a1 + %sext = sext <2 x i1> %fcmp to <2 x i64> + %res = bitcast <2 x i64> %sext to <2 x double> ret <2 x double> %res } @@ -888,7 +907,9 @@ ; X64: # BB#0: ; X64-NEXT: cmpordpd %xmm1, %xmm0 ; X64-NEXT: retq - %res = call <2 x double> @llvm.x86.sse2.cmp.pd(<2 x double> %a0, <2 x double> %a1, i8 7) + %fcmp = fcmp ord <2 x double> %a0, %a1 + %sext = sext <2 x i1> %fcmp to <2 x i64> + %res = bitcast <2 x i64> %sext to <2 x double> ret <2 x double> %res } @@ -916,7 +937,9 @@ ; X64: # BB#0: ; X64-NEXT: cmpunordpd %xmm1, %xmm0 ; X64-NEXT: retq - %res = call <2 x double> @llvm.x86.sse2.cmp.pd(<2 x double> %a0, <2 x double> %a1, i8 3) + %fcmp = fcmp uno <2 x double> %a0, %a1 + %sext = sext <2 x i1> %fcmp to <2 x i64> + %res = bitcast <2 x i64> %sext to <2 x double> ret <2 x double> %res }