diff --git a/llvm/test/CodeGen/RISCV/double-convert-indirect.ll b/llvm/test/CodeGen/RISCV/double-convert-indirect.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/double-convert-indirect.ll @@ -0,0 +1,50 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+f,+d -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV32IFD %s +; RUN: llc -mtriple=riscv64 -mattr=+f,+d -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV64IFD %s + +;; These testcases check that we merge sequences of `fcvt.d.wu; fcvt.s.d` into +;; `fcvt.s.wu`. +;; +;; TODO: Unfortunately, though this only uses 32-bit FP instructions, we cannot +;; do this optimisation without the D extension as we need 64-bit FP values to +;; be legal to get the right operands to match. + +define float @fcvt_s_w_via_d(i32 %0) nounwind { +; RV32IFD-LABEL: fcvt_s_w_via_d: +; RV32IFD: # %bb.0: +; RV32IFD-NEXT: fcvt.d.w ft0, a0 +; RV32IFD-NEXT: fcvt.s.d ft0, ft0 +; RV32IFD-NEXT: fmv.x.w a0, ft0 +; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fcvt_s_w_via_d: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fcvt.d.w ft0, a0 +; RV64IFD-NEXT: fcvt.s.d ft0, ft0 +; RV64IFD-NEXT: fmv.x.w a0, ft0 +; RV64IFD-NEXT: ret + %2 = sitofp i32 %0 to double + %3 = fptrunc double %2 to float + ret float %3 +} + +define float @fcvt_s_wu_via_d(i32 %0) nounwind { +; RV32IFD-LABEL: fcvt_s_wu_via_d: +; RV32IFD: # %bb.0: +; RV32IFD-NEXT: fcvt.d.wu ft0, a0 +; RV32IFD-NEXT: fcvt.s.d ft0, ft0 +; RV32IFD-NEXT: fmv.x.w a0, ft0 +; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fcvt_s_wu_via_d: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fcvt.d.wu ft0, a0 +; RV64IFD-NEXT: fcvt.s.d ft0, ft0 +; RV64IFD-NEXT: fmv.x.w a0, ft0 +; RV64IFD-NEXT: ret + %2 = uitofp i32 %0 to double + %3 = fptrunc double %2 to float + ret float %3 +}