Index: llvm/trunk/lib/Target/X86/X86InstrInfo.cpp =================================================================== --- llvm/trunk/lib/Target/X86/X86InstrInfo.cpp +++ llvm/trunk/lib/Target/X86/X86InstrInfo.cpp @@ -2701,6 +2701,30 @@ MI->getOperand(3).setImm((Src1Hi << 4) | (Src2Hi >> 4)); return TargetInstrInfo::commuteInstruction(MI, NewMI); } + case X86::CMPPDrri: + case X86::CMPPSrri: + case X86::VCMPPDrri: + case X86::VCMPPSrri: + case X86::VCMPPDYrri: + case X86::VCMPPSYrri: { + // Float comparison can be safely commuted for + // Ordered/Unordered/Equal/NotEqual tests + unsigned Imm = MI->getOperand(3).getImm() & 0x7; + switch (Imm) { + case 0x00: // EQUAL + case 0x03: // UNORDERED + case 0x04: // NOT EQUAL + case 0x07: // ORDERED + if (NewMI) { + MachineFunction &MF = *MI->getParent()->getParent(); + MI = MF.CloneMachineInstr(MI); + NewMI = false; + } + return TargetInstrInfo::commuteInstruction(MI, NewMI); + default: + return nullptr; + } + } case X86::CMOVB16rr: case X86::CMOVB32rr: case X86::CMOVB64rr: case X86::CMOVAE16rr: case X86::CMOVAE32rr: case X86::CMOVAE64rr: case X86::CMOVE16rr: case X86::CMOVE32rr: case X86::CMOVE64rr: @@ -2799,6 +2823,26 @@ SrcOpIdx1 = 1; SrcOpIdx2 = 2; return true; + case X86::CMPPDrri: + case X86::CMPPSrri: + case X86::VCMPPDrri: + case X86::VCMPPSrri: + case X86::VCMPPDYrri: + case X86::VCMPPSYrri: { + // Float comparison can be safely commuted for + // Ordered/Unordered/Equal/NotEqual tests + unsigned Imm = MI->getOperand(3).getImm() & 0x7; + switch (Imm) { + case 0x00: // EQUAL + case 0x03: // UNORDERED + case 0x04: // NOT EQUAL + case 0x07: // ORDERED + SrcOpIdx1 = 1; + SrcOpIdx2 = 2; + return true; + } + return false; + } case X86::VFMADDPDr231r: case X86::VFMADDPSr231r: case X86::VFMADDSDr231r: Index: llvm/trunk/lib/Target/X86/X86InstrSSE.td =================================================================== --- llvm/trunk/lib/Target/X86/X86InstrSSE.td +++ llvm/trunk/lib/Target/X86/X86InstrSSE.td @@ -2488,6 +2488,7 @@ Operand CC, Intrinsic Int, string asm, string asm_alt, Domain d, ImmLeaf immLeaf, OpndItins itins = SSE_ALU_F32P> { + let isCommutable = 1 in def rri : PIi8<0xC2, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2, CC:$cc), asm, [(set RC:$dst, (Int RC:$src1, RC:$src2, immLeaf:$cc))], Index: llvm/trunk/test/CodeGen/X86/commute-fcmp.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/commute-fcmp.ll +++ llvm/trunk/test/CodeGen/X86/commute-fcmp.ll @@ -0,0 +1,340 @@ +; RUN: llc -O3 -mtriple=x86_64-unknown -mcpu=x86-64 -mattr=+sse2 < %s | FileCheck %s --check-prefix=SSE +; RUN: llc -O3 -mtriple=x86_64-unknown -mcpu=x86-64 -mattr=+avx2 < %s | FileCheck %s --check-prefix=AVX + +; +; Float Comparisons +; Only equal/not-equal/ordered/unordered can be safely commuted +; + +define <4 x i32> @commute_cmpps_eq(<4 x float>* %a0, <4 x float> %a1) #0 { + ;SSE-LABEL: commute_cmpps_eq + ;SSE: cmpeqps (%rdi), %xmm0 + ;SSE-NEXT: retq + + ;AVX-LABEL: commute_cmpps_eq + ;AVX: vcmpeqps (%rdi), %xmm0, %xmm0 + ;AVX-NEXT: retq + + %1 = load <4 x float>* %a0 + %2 = fcmp oeq <4 x float> %1, %a1 + %3 = sext <4 x i1> %2 to <4 x i32> + ret <4 x i32> %3 +} + +define <4 x i32> @commute_cmpps_ne(<4 x float>* %a0, <4 x float> %a1) #0 { + ;SSE-LABEL: commute_cmpps_ne + ;SSE: cmpneqps (%rdi), %xmm0 + ;SSE-NEXT: retq + + ;AVX-LABEL: commute_cmpps_ne + ;AVX: vcmpneqps (%rdi), %xmm0, %xmm0 + ;AVX-NEXT: retq + + %1 = load <4 x float>* %a0 + %2 = fcmp une <4 x float> %1, %a1 + %3 = sext <4 x i1> %2 to <4 x i32> + ret <4 x i32> %3 +} + +define <4 x i32> @commute_cmpps_ord(<4 x float>* %a0, <4 x float> %a1) #0 { + ;SSE-LABEL: commute_cmpps_ord + ;SSE: cmpordps (%rdi), %xmm0 + ;SSE-NEXT: retq + + ;AVX-LABEL: commute_cmpps_ord + ;AVX: vcmpordps (%rdi), %xmm0, %xmm0 + ;AVX-NEXT: retq + + %1 = load <4 x float>* %a0 + %2 = fcmp ord <4 x float> %1, %a1 + %3 = sext <4 x i1> %2 to <4 x i32> + ret <4 x i32> %3 +} + +define <4 x i32> @commute_cmpps_uno(<4 x float>* %a0, <4 x float> %a1) #0 { + ;SSE-LABEL: commute_cmpps_uno + ;SSE: cmpunordps (%rdi), %xmm0 + ;SSE-NEXT: retq + + ;AVX-LABEL: commute_cmpps_uno + ;AVX: vcmpunordps (%rdi), %xmm0, %xmm0 + ;AVX-NEXT: retq + + %1 = load <4 x float>* %a0 + %2 = fcmp uno <4 x float> %1, %a1 + %3 = sext <4 x i1> %2 to <4 x i32> + ret <4 x i32> %3 +} + +define <4 x i32> @commute_cmpps_lt(<4 x float>* %a0, <4 x float> %a1) #0 { + ;SSE-LABEL: commute_cmpps_lt + ;SSE: movaps (%rdi), %xmm1 + ;SSE-NEXT: cmpltps %xmm0, %xmm1 + ;SSE-NEXT: movaps %xmm1, %xmm0 + ;SSE-NEXT: retq + + ;AVX-LABEL: commute_cmpps_lt + ;AVX: vmovaps (%rdi), %xmm1 + ;AVX-NEXT: vcmpltps %xmm0, %xmm1, %xmm0 + ;AVX-NEXT: retq + + %1 = load <4 x float>* %a0 + %2 = fcmp olt <4 x float> %1, %a1 + %3 = sext <4 x i1> %2 to <4 x i32> + ret <4 x i32> %3 +} + +define <4 x i32> @commute_cmpps_le(<4 x float>* %a0, <4 x float> %a1) #0 { + ;SSE-LABEL: commute_cmpps_le + ;SSE: movaps (%rdi), %xmm1 + ;SSE-NEXT: cmpleps %xmm0, %xmm1 + ;SSE-NEXT: movaps %xmm1, %xmm0 + ;SSE-NEXT: retq + + ;AVX-LABEL: commute_cmpps_le + ;AVX: vmovaps (%rdi), %xmm1 + ;AVX-NEXT: vcmpleps %xmm0, %xmm1, %xmm0 + ;AVX-NEXT: retq + + %1 = load <4 x float>* %a0 + %2 = fcmp ole <4 x float> %1, %a1 + %3 = sext <4 x i1> %2 to <4 x i32> + ret <4 x i32> %3 +} + +define <8 x i32> @commute_cmpps_eq_ymm(<8 x float>* %a0, <8 x float> %a1) #0 { + ;AVX-LABEL: commute_cmpps_eq_ymm + ;AVX: vcmpeqps (%rdi), %ymm0, %ymm0 + ;AVX-NEXT: retq + + %1 = load <8 x float>* %a0 + %2 = fcmp oeq <8 x float> %1, %a1 + %3 = sext <8 x i1> %2 to <8 x i32> + ret <8 x i32> %3 +} + +define <8 x i32> @commute_cmpps_ne_ymm(<8 x float>* %a0, <8 x float> %a1) #0 { + ;AVX-LABEL: commute_cmpps_ne_ymm + ;AVX: vcmpneqps (%rdi), %ymm0, %ymm0 + ;AVX-NEXT: retq + + %1 = load <8 x float>* %a0 + %2 = fcmp une <8 x float> %1, %a1 + %3 = sext <8 x i1> %2 to <8 x i32> + ret <8 x i32> %3 +} + +define <8 x i32> @commute_cmpps_ord_ymm(<8 x float>* %a0, <8 x float> %a1) #0 { + ;AVX-LABEL: commute_cmpps_ord_ymm + ;AVX: vcmpordps (%rdi), %ymm0, %ymm0 + ;AVX-NEXT: retq + + %1 = load <8 x float>* %a0 + %2 = fcmp ord <8 x float> %1, %a1 + %3 = sext <8 x i1> %2 to <8 x i32> + ret <8 x i32> %3 +} + +define <8 x i32> @commute_cmpps_uno_ymm(<8 x float>* %a0, <8 x float> %a1) #0 { + ;AVX-LABEL: commute_cmpps_uno_ymm + ;AVX: vcmpunordps (%rdi), %ymm0, %ymm0 + ;AVX-NEXT: retq + + %1 = load <8 x float>* %a0 + %2 = fcmp uno <8 x float> %1, %a1 + %3 = sext <8 x i1> %2 to <8 x i32> + ret <8 x i32> %3 +} + +define <8 x i32> @commute_cmpps_lt_ymm(<8 x float>* %a0, <8 x float> %a1) #0 { + ;AVX-LABEL: commute_cmpps_lt_ymm + ;AVX: vmovaps (%rdi), %ymm1 + ;AVX-NEXT: vcmpltps %ymm0, %ymm1, %ymm0 + ;AVX-NEXT: retq + + %1 = load <8 x float>* %a0 + %2 = fcmp olt <8 x float> %1, %a1 + %3 = sext <8 x i1> %2 to <8 x i32> + ret <8 x i32> %3 +} + +define <8 x i32> @commute_cmpps_le_ymm(<8 x float>* %a0, <8 x float> %a1) #0 { + ;AVX-LABEL: commute_cmpps_le_ymm + ;AVX: vmovaps (%rdi), %ymm1 + ;AVX-NEXT: vcmpleps %ymm0, %ymm1, %ymm0 + ;AVX-NEXT: retq + + %1 = load <8 x float>* %a0 + %2 = fcmp ole <8 x float> %1, %a1 + %3 = sext <8 x i1> %2 to <8 x i32> + ret <8 x i32> %3 +} + +; +; Double Comparisons +; Only equal/not-equal/ordered/unordered can be safely commuted +; + +define <2 x i64> @commute_cmppd_eq(<2 x double>* %a0, <2 x double> %a1) #0 { + ;SSE-LABEL: commute_cmppd_eq + ;SSE: cmpeqpd (%rdi), %xmm0 + ;SSE-NEXT: retq + + ;AVX-LABEL: commute_cmppd_eq + ;AVX: vcmpeqpd (%rdi), %xmm0, %xmm0 + ;AVX-NEXT: retq + + %1 = load <2 x double>* %a0 + %2 = fcmp oeq <2 x double> %1, %a1 + %3 = sext <2 x i1> %2 to <2 x i64> + ret <2 x i64> %3 +} + +define <2 x i64> @commute_cmppd_ne(<2 x double>* %a0, <2 x double> %a1) #0 { + ;SSE-LABEL: commute_cmppd_ne + ;SSE: cmpneqpd (%rdi), %xmm0 + ;SSE-NEXT: retq + + ;AVX-LABEL: commute_cmppd_ne + ;AVX: vcmpneqpd (%rdi), %xmm0, %xmm0 + ;AVX-NEXT: retq + + %1 = load <2 x double>* %a0 + %2 = fcmp une <2 x double> %1, %a1 + %3 = sext <2 x i1> %2 to <2 x i64> + ret <2 x i64> %3 +} + +define <2 x i64> @commute_cmppd_ord(<2 x double>* %a0, <2 x double> %a1) #0 { + ;SSE-LABEL: commute_cmppd_ord + ;SSE: cmpordpd (%rdi), %xmm0 + ;SSE-NEXT: retq + + ;AVX-LABEL: commute_cmppd_ord + ;AVX: vcmpordpd (%rdi), %xmm0, %xmm0 + ;AVX-NEXT: retq + + %1 = load <2 x double>* %a0 + %2 = fcmp ord <2 x double> %1, %a1 + %3 = sext <2 x i1> %2 to <2 x i64> + ret <2 x i64> %3 +} + +define <2 x i64> @commute_cmppd_uno(<2 x double>* %a0, <2 x double> %a1) #0 { + ;SSE-LABEL: commute_cmppd_uno + ;SSE: cmpunordpd (%rdi), %xmm0 + ;SSE-NEXT: retq + + ;AVX-LABEL: commute_cmppd_uno + ;AVX: vcmpunordpd (%rdi), %xmm0, %xmm0 + ;AVX-NEXT: retq + + %1 = load <2 x double>* %a0 + %2 = fcmp uno <2 x double> %1, %a1 + %3 = sext <2 x i1> %2 to <2 x i64> + ret <2 x i64> %3 +} + +define <2 x i64> @commute_cmppd_lt(<2 x double>* %a0, <2 x double> %a1) #0 { + ;SSE-LABEL: commute_cmppd_lt + ;SSE: movapd (%rdi), %xmm1 + ;SSE-NEXT: cmpltpd %xmm0, %xmm1 + ;SSE-NEXT: movapd %xmm1, %xmm0 + ;SSE-NEXT: retq + + ;AVX-LABEL: commute_cmppd_lt + ;AVX: vmovapd (%rdi), %xmm1 + ;AVX-NEXT: vcmpltpd %xmm0, %xmm1, %xmm0 + ;AVX-NEXT: retq + + %1 = load <2 x double>* %a0 + %2 = fcmp olt <2 x double> %1, %a1 + %3 = sext <2 x i1> %2 to <2 x i64> + ret <2 x i64> %3 +} + +define <2 x i64> @commute_cmppd_le(<2 x double>* %a0, <2 x double> %a1) #0 { + ;SSE-LABEL: commute_cmppd_le + ;SSE: movapd (%rdi), %xmm1 + ;SSE-NEXT: cmplepd %xmm0, %xmm1 + ;SSE-NEXT: movapd %xmm1, %xmm0 + ;SSE-NEXT: retq + + ;AVX-LABEL: commute_cmppd_le + ;AVX: vmovapd (%rdi), %xmm1 + ;AVX-NEXT: vcmplepd %xmm0, %xmm1, %xmm0 + ;AVX-NEXT: retq + + %1 = load <2 x double>* %a0 + %2 = fcmp ole <2 x double> %1, %a1 + %3 = sext <2 x i1> %2 to <2 x i64> + ret <2 x i64> %3 +} + +define <4 x i64> @commute_cmppd_eq_ymmm(<4 x double>* %a0, <4 x double> %a1) #0 { + ;AVX-LABEL: commute_cmppd_eq + ;AVX: vcmpeqpd (%rdi), %ymm0, %ymm0 + ;AVX-NEXT: retq + + %1 = load <4 x double>* %a0 + %2 = fcmp oeq <4 x double> %1, %a1 + %3 = sext <4 x i1> %2 to <4 x i64> + ret <4 x i64> %3 +} + +define <4 x i64> @commute_cmppd_ne_ymmm(<4 x double>* %a0, <4 x double> %a1) #0 { + ;AVX-LABEL: commute_cmppd_ne + ;AVX: vcmpneqpd (%rdi), %ymm0, %ymm0 + ;AVX-NEXT: retq + + %1 = load <4 x double>* %a0 + %2 = fcmp une <4 x double> %1, %a1 + %3 = sext <4 x i1> %2 to <4 x i64> + ret <4 x i64> %3 +} + +define <4 x i64> @commute_cmppd_ord_ymmm(<4 x double>* %a0, <4 x double> %a1) #0 { + ;AVX-LABEL: commute_cmppd_ord + ;AVX: vcmpordpd (%rdi), %ymm0, %ymm0 + ;AVX-NEXT: retq + + %1 = load <4 x double>* %a0 + %2 = fcmp ord <4 x double> %1, %a1 + %3 = sext <4 x i1> %2 to <4 x i64> + ret <4 x i64> %3 +} + +define <4 x i64> @commute_cmppd_uno_ymmm(<4 x double>* %a0, <4 x double> %a1) #0 { + ;AVX-LABEL: commute_cmppd_uno + ;AVX: vcmpunordpd (%rdi), %ymm0, %ymm0 + ;AVX-NEXT: retq + + %1 = load <4 x double>* %a0 + %2 = fcmp uno <4 x double> %1, %a1 + %3 = sext <4 x i1> %2 to <4 x i64> + ret <4 x i64> %3 +} + +define <4 x i64> @commute_cmppd_lt_ymmm(<4 x double>* %a0, <4 x double> %a1) #0 { + ;AVX-LABEL: commute_cmppd_lt + ;AVX: vmovapd (%rdi), %ymm1 + ;AVX-NEXT: vcmpltpd %ymm0, %ymm1, %ymm0 + ;AVX-NEXT: retq + + %1 = load <4 x double>* %a0 + %2 = fcmp olt <4 x double> %1, %a1 + %3 = sext <4 x i1> %2 to <4 x i64> + ret <4 x i64> %3 +} + +define <4 x i64> @commute_cmppd_le_ymmm(<4 x double>* %a0, <4 x double> %a1) #0 { + ;AVX-LABEL: commute_cmppd_le + ;AVX: vmovapd (%rdi), %ymm1 + ;AVX-NEXT: vcmplepd %ymm0, %ymm1, %ymm0 + ;AVX-NEXT: retq + + %1 = load <4 x double>* %a0 + %2 = fcmp ole <4 x double> %1, %a1 + %3 = sext <4 x i1> %2 to <4 x i64> + ret <4 x i64> %3 +}