Index: llvm/trunk/lib/Target/X86/X86FastISel.cpp =================================================================== --- llvm/trunk/lib/Target/X86/X86FastISel.cpp +++ llvm/trunk/lib/Target/X86/X86FastISel.cpp @@ -123,6 +123,9 @@ bool X86SelectTrunc(const Instruction *I); + bool X86SelectFPExtOrFPTrunc(const Instruction *I, unsigned Opc, + const TargetRegisterClass *RC); + bool X86SelectFPExt(const Instruction *I); bool X86SelectFPTrunc(const Instruction *I); @@ -2001,41 +2004,46 @@ return false; } +// Helper method used by X86SelectFPExt and X86SelectFPTrunc. +bool X86FastISel::X86SelectFPExtOrFPTrunc(const Instruction *I, + unsigned TargetOpc, + const TargetRegisterClass *RC) { + assert((I->getOpcode() == Instruction::FPExt || + I->getOpcode() == Instruction::FPTrunc) && + "Instruction must be an FPExt or FPTrunc!"); + + unsigned OpReg = getRegForValue(I->getOperand(0)); + if (OpReg == 0) + return false; + + unsigned ResultReg = createResultReg(RC); + MachineInstrBuilder MIB; + MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpc), + ResultReg); + if (Subtarget->hasAVX()) + MIB.addReg(OpReg); + MIB.addReg(OpReg); + updateValueMap(I, ResultReg); + return true; +} + bool X86FastISel::X86SelectFPExt(const Instruction *I) { - // fpext from float to double. - if (X86ScalarSSEf64 && - I->getType()->isDoubleTy()) { - const Value *V = I->getOperand(0); - if (V->getType()->isFloatTy()) { - unsigned OpReg = getRegForValue(V); - if (OpReg == 0) return false; - unsigned ResultReg = createResultReg(&X86::FR64RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, - TII.get(X86::CVTSS2SDrr), ResultReg) - .addReg(OpReg); - updateValueMap(I, ResultReg); - return true; - } + if (X86ScalarSSEf64 && I->getType()->isDoubleTy() && + I->getOperand(0)->getType()->isFloatTy()) { + // fpext from float to double. + unsigned Opc = Subtarget->hasAVX() ? X86::VCVTSS2SDrr : X86::CVTSS2SDrr; + return X86SelectFPExtOrFPTrunc(I, Opc, &X86::FR64RegClass); } return false; } bool X86FastISel::X86SelectFPTrunc(const Instruction *I) { - if (X86ScalarSSEf64) { - if (I->getType()->isFloatTy()) { - const Value *V = I->getOperand(0); - if (V->getType()->isDoubleTy()) { - unsigned OpReg = getRegForValue(V); - if (OpReg == 0) return false; - unsigned ResultReg = createResultReg(&X86::FR32RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, - TII.get(X86::CVTSD2SSrr), ResultReg) - .addReg(OpReg); - updateValueMap(I, ResultReg); - return true; - } - } + if (X86ScalarSSEf64 && I->getType()->isFloatTy() && + I->getOperand(0)->getType()->isDoubleTy()) { + // fptrunc from double to float. + unsigned Opc = Subtarget->hasAVX() ? X86::VCVTSD2SSrr : X86::CVTSD2SSrr; + return X86SelectFPExtOrFPTrunc(I, Opc, &X86::FR32RegClass); } return false; Index: llvm/trunk/test/CodeGen/X86/fast-isel-fptrunc-fpext.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/fast-isel-fptrunc-fpext.ll +++ llvm/trunk/test/CodeGen/X86/fast-isel-fptrunc-fpext.ll @@ -0,0 +1,65 @@ +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 -fast-isel | FileCheck %s --check-prefix=ALL --check-prefix=SSE +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx -fast-isel | FileCheck %s --check-prefix=ALL --check-prefix=AVX +; +; Verify that fast-isel doesn't select legacy SSE instructions on targets that +; feature AVX. +; +; Test cases are obtained from the following code snippet: +; /// +; double single_to_double_rr(float x) { +; return (double)x; +; } +; float double_to_single_rr(double x) { +; return (float)x; +; } +; double single_to_double_rm(float *x) { +; return (double)*x; +; } +; float double_to_single_rm(double *x) { +; return (float)*x; +; } +; /// + +define double @single_to_double_rr(float %x) { +; ALL-LABEL: single_to_double_rr: +; SSE-NOT: vcvtss2sd +; AVX: vcvtss2sd %xmm0, %xmm0, %xmm0 +; ALL: ret +entry: + %conv = fpext float %x to double + ret double %conv +} + +define float @double_to_single_rr(double %x) { +; ALL-LABEL: double_to_single_rr: +; SSE-NOT: vcvtsd2ss +; AVX: vcvtsd2ss %xmm0, %xmm0, %xmm0 +; ALL: ret +entry: + %conv = fptrunc double %x to float + ret float %conv +} + +define double @single_to_double_rm(float* %x) { +; ALL-LABEL: single_to_double_rm: +; SSE: cvtss2sd (%rdi), %xmm0 +; AVX: vmovss (%rdi), %xmm0 +; AVX-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 +; ALL-NEXT: ret +entry: + %0 = load float* %x, align 4 + %conv = fpext float %0 to double + ret double %conv +} + +define float @double_to_single_rm(double* %x) { +; ALL-LABEL: double_to_single_rm: +; SSE: cvtsd2ss (%rdi), %xmm0 +; AVX: vmovsd (%rdi), %xmm0 +; AVX-NEXT: vcvtsd2ss %xmm0, %xmm0, %xmm0 +; ALL-NEXT: ret +entry: + %0 = load double* %x, align 8 + %conv = fptrunc double %0 to float + ret float %conv +}