Index: llvm/trunk/lib/Target/RISCV/RISCVISelLowering.cpp =================================================================== --- llvm/trunk/lib/Target/RISCV/RISCVISelLowering.cpp +++ llvm/trunk/lib/Target/RISCV/RISCVISelLowering.cpp @@ -2412,6 +2412,52 @@ } } + // Since TargetLowering::getRegForInlineAsmConstraint uses the name of the + // TableGen record rather than the AsmName to choose registers for InlineAsm + // constraints, plus we want to match those names to the widest floating point + // register type available, manually select floating point registers here. + if (Subtarget.hasStdExtF() || Subtarget.hasStdExtD()) { + std::pair FReg = + StringSwitch>(Constraint.lower()) + .Case("{f0}", {RISCV::F0_32, RISCV::F0_64}) + .Case("{f1}", {RISCV::F1_32, RISCV::F1_64}) + .Case("{f2}", {RISCV::F2_32, RISCV::F2_64}) + .Case("{f3}", {RISCV::F3_32, RISCV::F3_64}) + .Case("{f4}", {RISCV::F4_32, RISCV::F4_64}) + .Case("{f5}", {RISCV::F5_32, RISCV::F5_64}) + .Case("{f6}", {RISCV::F6_32, RISCV::F6_64}) + .Case("{f7}", {RISCV::F7_32, RISCV::F7_64}) + .Case("{f8}", {RISCV::F8_32, RISCV::F8_64}) + .Case("{f9}", {RISCV::F9_32, RISCV::F9_64}) + .Case("{f10}", {RISCV::F10_32, RISCV::F10_64}) + .Case("{f11}", {RISCV::F11_32, RISCV::F11_64}) + .Case("{f12}", {RISCV::F12_32, RISCV::F12_64}) + .Case("{f13}", {RISCV::F13_32, RISCV::F13_64}) + .Case("{f14}", {RISCV::F14_32, RISCV::F14_64}) + .Case("{f15}", {RISCV::F15_32, RISCV::F15_64}) + .Case("{f16}", {RISCV::F16_32, RISCV::F16_64}) + .Case("{f17}", {RISCV::F17_32, RISCV::F17_64}) + .Case("{f18}", {RISCV::F18_32, RISCV::F18_64}) + .Case("{f19}", {RISCV::F19_32, RISCV::F19_64}) + .Case("{f20}", {RISCV::F20_32, RISCV::F20_64}) + .Case("{f21}", {RISCV::F21_32, RISCV::F21_64}) + .Case("{f22}", {RISCV::F22_32, RISCV::F22_64}) + .Case("{f23}", {RISCV::F23_32, RISCV::F23_64}) + .Case("{f24}", {RISCV::F24_32, RISCV::F24_64}) + .Case("{f25}", {RISCV::F25_32, RISCV::F25_64}) + .Case("{f26}", {RISCV::F26_32, RISCV::F26_64}) + .Case("{f27}", {RISCV::F27_32, RISCV::F27_64}) + .Case("{f28}", {RISCV::F28_32, RISCV::F28_64}) + .Case("{f29}", {RISCV::F29_32, RISCV::F29_64}) + .Case("{f30}", {RISCV::F30_32, RISCV::F30_64}) + .Case("{f31}", {RISCV::F31_32, RISCV::F31_64}) + .Default({-1U, -1U}); + if (FReg.first != -1U) + return Subtarget.hasStdExtD() + ? std::make_pair(FReg.second, &RISCV::FPR64RegClass) + : std::make_pair(FReg.first, &RISCV::FPR32RegClass); + } + return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); } Index: llvm/trunk/test/CodeGen/RISCV/inline-asm-clobbers.ll =================================================================== --- llvm/trunk/test/CodeGen/RISCV/inline-asm-clobbers.ll +++ llvm/trunk/test/CodeGen/RISCV/inline-asm-clobbers.ll @@ -0,0 +1,61 @@ +; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV32I %s +; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV64I %s +; RUN: llc -mtriple=riscv32 -mattr=+f -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV32I %s +; RUN: llc -mtriple=riscv64 -mattr=+f -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV64I %s +; RUN: llc -mtriple=riscv32 -mattr=+d -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV32I %s +; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV64I %s +; RUN: llc -mtriple=riscv32 -mattr=+f -target-abi ilp32f -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV32IF %s +; RUN: llc -mtriple=riscv64 -mattr=+f -target-abi lp64f -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV64IF %s +; RUN: llc -mtriple=riscv32 -mattr=+d -target-abi ilp32d -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV32ID %s +; RUN: llc -mtriple=riscv64 -mattr=+d -target-abi lp64d -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV64ID %s + + +define void @testcase() nounwind { +; RV32I-LABEL: testcase: +; RV32I: sw s1, {{[0-9]+}}(sp) +; RV32I-NEXT: sw s2, {{[0-9]+}}(sp) +; RV32I-NOT: fsw fs0, {{[0-9]+}}(sp) +; RV32I-NOT: fsd fs0, {{[0-9]+}}(sp) +; +; RV64I-LABEL: testcase: +; RV64I: sd s1, {{[0-9]+}}(sp) +; RV64I-NEXT: sd s2, {{[0-9]+}}(sp) +; RV64I-NOT: fsw fs0, {{[0-9]+}}(sp) +; RV64I-NOT: fsd fs0, {{[0-9]+}}(sp) +; +; RV32IF-LABEL: testcase: +; RV32IF: sw s1, {{[0-9]+}}(sp) +; RV32IF-NEXT: sw s2, {{[0-9]+}}(sp) +; RV32IF-NEXT: fsw fs0, {{[0-9]+}}(sp) +; RV32IF-NEXT: fsw fs1, {{[0-9]+}}(sp) +; +; RV64IF-LABEL: testcase: +; RV64IF: sd s1, {{[0-9]+}}(sp) +; RV64IF-NEXT: sd s2, {{[0-9]+}}(sp) +; RV64IF-NEXT: fsw fs0, {{[0-9]+}}(sp) +; RV64IF-NEXT: fsw fs1, {{[0-9]+}}(sp) +; +; RV32ID-LABEL: testcase: +; RV32ID: sw s1, {{[0-9]+}}(sp) +; RV32ID-NEXT: sw s2, {{[0-9]+}}(sp) +; RV32ID-NEXT: fsd fs0, {{[0-9]+}}(sp) +; RV32ID-NEXT: fsd fs1, {{[0-9]+}}(sp) +; +; RV64ID-LABEL: testcase: +; RV64ID: sd s1, {{[0-9]+}}(sp) +; RV64ID-NEXT: sd s2, {{[0-9]+}}(sp) +; RV64ID-NEXT: fsd fs0, {{[0-9]+}}(sp) +; RV64ID-NEXT: fsd fs1, {{[0-9]+}}(sp) + tail call void asm sideeffect "", "~{f8},~{f9},~{x9},~{x18}"() + ret void +}