diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -843,8 +843,9 @@ case ISD::ConstantFP: { const APFloat &APF = cast(Node)->getValueAPF(); if (Subtarget->hasStdExtZfa()) { - // fli.h requires Zfh, but we might only have Zfhmin. - if (VT == MVT::f16 && Subtarget->hasStdExtZfh() && + // fli.h requires Zfh or Zvfh, but we might only have Zfhmin. + if (VT == MVT::f16 && + (Subtarget->hasStdExtZfh() || Subtarget->hasStdExtZvfh()) && RISCVLoadFPImm::getLoadFP16Imm(APF) != -1) break; if (VT == MVT::f32 && RISCVLoadFPImm::getLoadFP32Imm(APF) != -1) @@ -2972,8 +2973,23 @@ // td can handle +0.0 already. if (APF.isPosZero()) return false; + + MVT VT = CFP->getSimpleValueType(0); + + if (Subtarget->hasStdExtZfa()) { + // fli.h requires Zfh or Zvfh, but we might only have Zfhmin. + if (VT == MVT::f16 && + (Subtarget->hasStdExtZfh() || Subtarget->hasStdExtZvfh()) && + RISCVLoadFPImm::getLoadFP16Imm(APF) != -1) + return false; + if (VT == MVT::f32 && RISCVLoadFPImm::getLoadFP32Imm(APF) != -1) + return false; + if (VT == MVT::f64 && RISCVLoadFPImm::getLoadFP64Imm(APF) != -1) + return false; + } + MVT XLenVT = Subtarget->getXLenVT(); - if (CFP->getValueType(0) == MVT::f64 && !Subtarget->is64Bit()) { + if (VT == MVT::f64 && !Subtarget->is64Bit()) { assert(APF.isNegZero() && "Unexpected constant."); return false; } diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -1548,8 +1548,9 @@ return false; if (Subtarget.hasStdExtZfa()) { - // fli.h requires Zfh, but we might only have Zfhmin. - if (VT == MVT::f16 && Subtarget.hasStdExtZfh() && + // fli.h requires Zfh or Zvfh, but we might only have Zfhmin. + if (VT == MVT::f16 && + (Subtarget.hasStdExtZfh() || Subtarget.hasStdExtZvfh()) && RISCVLoadFPImm::getLoadFP16Imm(Imm) != -1) return true; if (VT == MVT::f32 && RISCVLoadFPImm::getLoadFP32Imm(Imm) != -1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsplats-zfa.ll b/llvm/test/CodeGen/RISCV/rvv/vsplats-zfa.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vsplats-zfa.ll @@ -0,0 +1,41 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+zfh,+experimental-zfa,+experimental-zvfh,+v -target-abi ilp32d -verify-machineinstrs < %s \ +; RUN: | FileCheck %s --check-prefixes=CHECK +; RUN: llc -mtriple=riscv64 -mattr=+zfh,+experimental-zfa,+experimental-zvfh,+v -target-abi lp64d -verify-machineinstrs < %s \ +; RUN: | FileCheck %s --check-prefixes=CHECK + +define @vsplat_f16_0p625() { +; CHECK-LABEL: vsplat_f16_0p625: +; CHECK: # %bb.0: +; CHECK-NEXT: fli.h ft0, 6.250000e-01 +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; CHECK-NEXT: vfmv.v.f v8, ft0 +; CHECK-NEXT: ret + %head = insertelement poison, half 0.625, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + ret %splat +} + +define @vsplat_f32_0p75() { +; CHECK-LABEL: vsplat_f32_0p75: +; CHECK: # %bb.0: +; CHECK-NEXT: fli.s ft0, 7.500000e-01 +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma +; CHECK-NEXT: vfmv.v.f v8, ft0 +; CHECK-NEXT: ret + %head = insertelement poison, float 0.75, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + ret %splat +} + +define @vsplat_f64_neg1() { +; CHECK-LABEL: vsplat_f64_neg1: +; CHECK: # %bb.0: +; CHECK-NEXT: fli.d ft0, -1.000000e+00 +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma +; CHECK-NEXT: vfmv.v.f v8, ft0 +; CHECK-NEXT: ret + %head = insertelement poison, double -1.0, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + ret %splat +}