Index: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp =================================================================== --- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp +++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp @@ -35214,6 +35214,11 @@ if (!OpVT.isScalarInteger() || OpSize < 128 || isNullConstant(Y)) return SDValue(); + // Bail out if we know that this is not really just an oversized integer. + if (peekThroughBitcasts(X).getValueType() == MVT::f128 || + peekThroughBitcasts(Y).getValueType() == MVT::f128) + return SDValue(); + // TODO: Use PXOR + PTEST for SSE4.1 or later? // TODO: Add support for AVX-512. EVT VT = SetCC->getValueType(0); Index: llvm/trunk/test/CodeGen/X86/fp128-cast.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/fp128-cast.ll +++ llvm/trunk/test/CodeGen/X86/fp128-cast.ll @@ -1,5 +1,7 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -O2 -mtriple=x86_64-linux-android -mattr=+mmx | FileCheck %s --check-prefix=X64 ; RUN: llc < %s -O2 -mtriple=x86_64-linux-gnu -mattr=+mmx | FileCheck %s --check-prefix=X64 +; RUN: llc < %s -O2 -mtriple=x86_64-linux-gnu -mattr=-mmx | FileCheck %s --check-prefix=X64_NO_MMX ; RUN: llc < %s -O2 -mtriple=i686-linux-gnu -mattr=+mmx | FileCheck %s --check-prefix=X32 ; Check soft floating point conversion function calls. @@ -359,6 +361,69 @@ ; X64: retq } +define i1 @PR34866(i128 %x) { +; X64-LABEL: PR34866: +; X64: # BB#0: +; X64-NEXT: movaps {{.*}}(%rip), %xmm0 +; X64-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) +; X64-NEXT: xorq -{{[0-9]+}}(%rsp), %rsi +; X64-NEXT: xorq -{{[0-9]+}}(%rsp), %rdi +; X64-NEXT: orq %rsi, %rdi +; X64-NEXT: sete %al +; X64-NEXT: retq +; +; X64_NO_MMX-LABEL: PR34866: +; X64_NO_MMX: # BB#0: +; X64_NO_MMX-NEXT: orq %rsi, %rdi +; X64_NO_MMX-NEXT: sete %al +; X64_NO_MMX-NEXT: retq +; +; X32-LABEL: PR34866: +; X32: # BB#0: +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: orl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: orl {{[0-9]+}}(%esp), %eax +; X32-NEXT: orl %ecx, %eax +; X32-NEXT: sete %al +; X32-NEXT: retl + %bc_mmx = bitcast fp128 0xL00000000000000000000000000000000 to i128 + %cmp = icmp eq i128 %bc_mmx, %x + ret i1 %cmp +} + +define i1 @PR34866_commute(i128 %x) { +; X64-LABEL: PR34866_commute: +; X64: # BB#0: +; X64-NEXT: movaps {{.*}}(%rip), %xmm0 +; X64-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) +; X64-NEXT: xorq -{{[0-9]+}}(%rsp), %rsi +; X64-NEXT: xorq -{{[0-9]+}}(%rsp), %rdi +; X64-NEXT: orq %rsi, %rdi +; X64-NEXT: sete %al +; X64-NEXT: retq +; +; X64_NO_MMX-LABEL: PR34866_commute: +; X64_NO_MMX: # BB#0: +; X64_NO_MMX-NEXT: orq %rsi, %rdi +; X64_NO_MMX-NEXT: sete %al +; X64_NO_MMX-NEXT: retq +; +; X32-LABEL: PR34866_commute: +; X32: # BB#0: +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: orl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: orl {{[0-9]+}}(%esp), %eax +; X32-NEXT: orl %ecx, %eax +; X32-NEXT: sete %al +; X32-NEXT: retl + %bc_mmx = bitcast fp128 0xL00000000000000000000000000000000 to i128 + %cmp = icmp eq i128 %x, %bc_mmx + ret i1 %cmp +} + + declare double @copysign(double, double) #1 attributes #2 = { nounwind readnone }