diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -842,16 +842,8 @@ } case ISD::ConstantFP: { const APFloat &APF = cast(Node)->getValueAPF(); - if (Subtarget->hasStdExtZfa()) { - // fli.h requires Zfh, but we might only have Zfhmin. - if (VT == MVT::f16 && Subtarget->hasStdExtZfh() && - RISCVLoadFPImm::getLoadFP16Imm(APF) != -1) - break; - if (VT == MVT::f32 && RISCVLoadFPImm::getLoadFP32Imm(APF) != -1) - break; - if (VT == MVT::f64 && RISCVLoadFPImm::getLoadFP64Imm(APF) != -1) - break; - } + if (static_cast(TLI)->isLegalZfaFPImm(APF, VT)) + break; bool NegZeroF64 = APF.isNegZero() && VT == MVT::f64; SDValue Imm; @@ -2972,8 +2964,14 @@ // td can handle +0.0 already. if (APF.isPosZero()) return false; + + MVT VT = CFP->getSimpleValueType(0); + + if (static_cast(TLI)->isLegalZfaFPImm(APF, VT)) + return false; + MVT XLenVT = Subtarget->getXLenVT(); - if (CFP->getValueType(0) == MVT::f64 && !Subtarget->is64Bit()) { + if (VT == MVT::f64 && !Subtarget->is64Bit()) { assert(APF.isNegZero() && "Unexpected constant."); return false; } diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h --- a/llvm/lib/Target/RISCV/RISCVISelLowering.h +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h @@ -389,6 +389,7 @@ SmallVectorImpl &Ops) const override; bool shouldScalarizeBinop(SDValue VecOp) const override; bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override; + bool isLegalZfaFPImm(const APFloat &Imm, EVT VT) const; bool isFPImmLegal(const APFloat &Imm, EVT VT, bool ForCodeSize) const override; bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT, diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -1538,6 +1538,22 @@ return false; } +bool RISCVTargetLowering::isLegalZfaFPImm(const APFloat &Imm, EVT VT) const { + if (Subtarget.hasStdExtZfa()) { + // fli.h requires Zfh or Zvfh, but we might only have Zfhmin. + if (VT == MVT::f16 && + (Subtarget.hasStdExtZfh() || Subtarget.hasStdExtZvfh()) && + RISCVLoadFPImm::getLoadFP16Imm(Imm) != -1) + return true; + if (VT == MVT::f32 && RISCVLoadFPImm::getLoadFP32Imm(Imm) != -1) + return true; + if (VT == MVT::f64 && RISCVLoadFPImm::getLoadFP64Imm(Imm) != -1) + return true; + } + + return false; +} + bool RISCVTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT, bool ForCodeSize) const { if (VT == MVT::f16 && !Subtarget.hasStdExtZfhOrZfhmin()) @@ -1547,16 +1563,8 @@ if (VT == MVT::f64 && !Subtarget.hasStdExtD()) return false; - if (Subtarget.hasStdExtZfa()) { - // fli.h requires Zfh, but we might only have Zfhmin. - if (VT == MVT::f16 && Subtarget.hasStdExtZfh() && - RISCVLoadFPImm::getLoadFP16Imm(Imm) != -1) - return true; - if (VT == MVT::f32 && RISCVLoadFPImm::getLoadFP32Imm(Imm) != -1) - return true; - if (VT == MVT::f64 && RISCVLoadFPImm::getLoadFP64Imm(Imm) != -1) - return true; - } + if (isLegalZfaFPImm(Imm, VT)) + return true; // Cannot create a 64 bit floating-point immediate value for rv32. if (Subtarget.getXLen() < VT.getScalarSizeInBits()) { diff --git a/llvm/test/CodeGen/RISCV/rvv/vsplats-zfa.ll b/llvm/test/CodeGen/RISCV/rvv/vsplats-zfa.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vsplats-zfa.ll @@ -0,0 +1,41 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+zfh,+experimental-zfa,+experimental-zvfh,+v -target-abi ilp32d -verify-machineinstrs < %s \ +; RUN: | FileCheck %s --check-prefixes=CHECK +; RUN: llc -mtriple=riscv64 -mattr=+zfh,+experimental-zfa,+experimental-zvfh,+v -target-abi lp64d -verify-machineinstrs < %s \ +; RUN: | FileCheck %s --check-prefixes=CHECK + +define @vsplat_f16_0p625() { +; CHECK-LABEL: vsplat_f16_0p625: +; CHECK: # %bb.0: +; CHECK-NEXT: fli.h ft0, 6.250000e-01 +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; CHECK-NEXT: vfmv.v.f v8, ft0 +; CHECK-NEXT: ret + %head = insertelement poison, half 0.625, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + ret %splat +} + +define @vsplat_f32_0p75() { +; CHECK-LABEL: vsplat_f32_0p75: +; CHECK: # %bb.0: +; CHECK-NEXT: fli.s ft0, 7.500000e-01 +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma +; CHECK-NEXT: vfmv.v.f v8, ft0 +; CHECK-NEXT: ret + %head = insertelement poison, float 0.75, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + ret %splat +} + +define @vsplat_f64_neg1() { +; CHECK-LABEL: vsplat_f64_neg1: +; CHECK: # %bb.0: +; CHECK-NEXT: fli.d ft0, -1.000000e+00 +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma +; CHECK-NEXT: vfmv.v.f v8, ft0 +; CHECK-NEXT: ret + %head = insertelement poison, double -1.0, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + ret %splat +}