Index: llvm/lib/Target/RISCV/RISCV.td
===================================================================
--- llvm/lib/Target/RISCV/RISCV.td
+++ llvm/lib/Target/RISCV/RISCV.td
@@ -165,6 +165,10 @@
 def IsRV32E : Predicate<"Subtarget->isRV32E()">,
                         AssemblerPredicate<(all_of FeatureRV32E)>;
 
+def FeatureNoFdiv
+    : SubtargetFeature<"no-fdiv", "NoFdiv", "true",
+                       "Disable FP div and sqrt instructions">;
+
 def FeatureRelax
     : SubtargetFeature<"relax", "EnableLinkerRelax", "true",
                        "Enable Linker relaxation.">;
Index: llvm/lib/Target/RISCV/RISCVISelLowering.cpp
===================================================================
--- llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -65,6 +65,10 @@
     ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
   }
 
+  if (Subtarget.noFdiv() && !(Subtarget.hasStdExtF() | Subtarget.hasStdExtD()))
+    errs() << "Machine attribute no-fdiv requires F or D extensions for FP "
+              "registers.\n";
+
   switch (ABI) {
   default:
     report_fatal_error("Don't know how to lower this ABI");
@@ -178,6 +182,11 @@
     setTruncStoreAction(MVT::f32, MVT::f16, Expand);
   }
 
+  if (Subtarget.hasStdExtF() && Subtarget.noFdiv()) {
+    setOperationAction(ISD::FDIV, MVT::f32, Expand);
+    setOperationAction(ISD::FSQRT, MVT::f32, Expand);
+  }
+
   if (Subtarget.hasStdExtF() && Subtarget.is64Bit())
     setOperationAction(ISD::BITCAST, MVT::i32, Custom);
 
@@ -205,6 +214,11 @@
     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
   }
 
+  if (Subtarget.hasStdExtD() && Subtarget.noFdiv()) {
+    setOperationAction(ISD::FDIV, MVT::f64, Expand);
+    setOperationAction(ISD::FSQRT, MVT::f64, Expand);
+  }
+
   setOperationAction(ISD::GlobalAddress, XLenVT, Custom);
   setOperationAction(ISD::BlockAddress, XLenVT, Custom);
   setOperationAction(ISD::ConstantPool, XLenVT, Custom);
Index: llvm/lib/Target/RISCV/RISCVSubtarget.h
===================================================================
--- llvm/lib/Target/RISCV/RISCVSubtarget.h
+++ llvm/lib/Target/RISCV/RISCVSubtarget.h
@@ -55,6 +55,7 @@
   bool EnableLinkerRelax = false;
   bool EnableRVCHintInstrs = false;
   bool EnableSaveRestore = false;
+  bool NoFdiv = false;
   unsigned XLen = 32;
   MVT XLenVT = MVT::i32;
   RISCVABI::ABI TargetABI = RISCVABI::ABI_Unknown;
@@ -115,6 +116,7 @@
   bool enableLinkerRelax() const { return EnableLinkerRelax; }
   bool enableRVCHintInstrs() const { return EnableRVCHintInstrs; }
   bool enableSaveRestore() const { return EnableSaveRestore; }
+  bool noFdiv() const { return NoFdiv; }
   MVT getXLenVT() const { return XLenVT; }
   unsigned getXLen() const { return XLen; }
   RISCVABI::ABI getTargetABI() const { return TargetABI; }
Index: llvm/test/CodeGen/RISCV/no-fdiv-attr.ll
===================================================================
--- /dev/null
+++ llvm/test/CodeGen/RISCV/no-fdiv-attr.ll
@@ -0,0 +1,173 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+f,+no-fdiv -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=RV32IF %s
+; RUN: llc -mtriple=riscv32 -mattr=+d,+no-fdiv -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=RV32IFD %s
+; RUN: llc -mtriple=riscv64 -mattr=+f,+no-fdiv -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=RV64IF %s
+; RUN: llc -mtriple=riscv64 -mattr=+d,+no-fdiv -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=RV64IFD %s
+
+define float @fdiv_s(float %a, float %b) nounwind {
+; RV32IF-LABEL: fdiv_s:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    addi sp, sp, -16
+; RV32IF-NEXT:    sw ra, 12(sp)
+; RV32IF-NEXT:    call __divsf3
+; RV32IF-NEXT:    lw ra, 12(sp)
+; RV32IF-NEXT:    addi sp, sp, 16
+; RV32IF-NEXT:    ret
+;
+; RV32IFD-LABEL: fdiv_s:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    addi sp, sp, -16
+; RV32IFD-NEXT:    sw ra, 12(sp)
+; RV32IFD-NEXT:    call __divsf3
+; RV32IFD-NEXT:    lw ra, 12(sp)
+; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    ret
+;
+; RV64IF-LABEL: fdiv_s:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    addi sp, sp, -16
+; RV64IF-NEXT:    sd ra, 8(sp)
+; RV64IF-NEXT:    call __divsf3
+; RV64IF-NEXT:    ld ra, 8(sp)
+; RV64IF-NEXT:    addi sp, sp, 16
+; RV64IF-NEXT:    ret
+;
+; RV64IFD-LABEL: fdiv_s:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    addi sp, sp, -16
+; RV64IFD-NEXT:    sd ra, 8(sp)
+; RV64IFD-NEXT:    call __divsf3
+; RV64IFD-NEXT:    ld ra, 8(sp)
+; RV64IFD-NEXT:    addi sp, sp, 16
+; RV64IFD-NEXT:    ret
+  %1 = fdiv float %a, %b
+  ret float %1
+}
+
+declare float @llvm.sqrt.f32(float)
+
+define float @fsqrt_s(float %a) nounwind {
+; RV32IF-LABEL: fsqrt_s:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    addi sp, sp, -16
+; RV32IF-NEXT:    sw ra, 12(sp)
+; RV32IF-NEXT:    call sqrtf
+; RV32IF-NEXT:    lw ra, 12(sp)
+; RV32IF-NEXT:    addi sp, sp, 16
+; RV32IF-NEXT:    ret
+;
+; RV32IFD-LABEL: fsqrt_s:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    addi sp, sp, -16
+; RV32IFD-NEXT:    sw ra, 12(sp)
+; RV32IFD-NEXT:    call sqrtf
+; RV32IFD-NEXT:    lw ra, 12(sp)
+; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    ret
+;
+; RV64IF-LABEL: fsqrt_s:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    addi sp, sp, -16
+; RV64IF-NEXT:    sd ra, 8(sp)
+; RV64IF-NEXT:    call sqrtf
+; RV64IF-NEXT:    ld ra, 8(sp)
+; RV64IF-NEXT:    addi sp, sp, 16
+; RV64IF-NEXT:    ret
+;
+; RV64IFD-LABEL: fsqrt_s:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    addi sp, sp, -16
+; RV64IFD-NEXT:    sd ra, 8(sp)
+; RV64IFD-NEXT:    call sqrtf
+; RV64IFD-NEXT:    ld ra, 8(sp)
+; RV64IFD-NEXT:    addi sp, sp, 16
+; RV64IFD-NEXT:    ret
+  %1 = call float @llvm.sqrt.f32(float %a)
+  ret float %1
+}
+
+define double @fdiv_d(double %a, double %b) nounwind {
+; RV32IF-LABEL: fdiv_d:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    addi sp, sp, -16
+; RV32IF-NEXT:    sw ra, 12(sp)
+; RV32IF-NEXT:    call __divdf3
+; RV32IF-NEXT:    lw ra, 12(sp)
+; RV32IF-NEXT:    addi sp, sp, 16
+; RV32IF-NEXT:    ret
+;
+; RV32IFD-LABEL: fdiv_d:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    addi sp, sp, -16
+; RV32IFD-NEXT:    sw ra, 12(sp)
+; RV32IFD-NEXT:    call __divdf3
+; RV32IFD-NEXT:    lw ra, 12(sp)
+; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    ret
+;
+; RV64IF-LABEL: fdiv_d:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    addi sp, sp, -16
+; RV64IF-NEXT:    sd ra, 8(sp)
+; RV64IF-NEXT:    call __divdf3
+; RV64IF-NEXT:    ld ra, 8(sp)
+; RV64IF-NEXT:    addi sp, sp, 16
+; RV64IF-NEXT:    ret
+;
+; RV64IFD-LABEL: fdiv_d:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    addi sp, sp, -16
+; RV64IFD-NEXT:    sd ra, 8(sp)
+; RV64IFD-NEXT:    call __divdf3
+; RV64IFD-NEXT:    ld ra, 8(sp)
+; RV64IFD-NEXT:    addi sp, sp, 16
+; RV64IFD-NEXT:    ret
+  %1 = fdiv double %a, %b
+  ret double %1
+}
+
+declare double @llvm.sqrt.f64(double)
+
+define double @fsqrt_d(double %a) nounwind {
+; RV32IF-LABEL: fsqrt_d:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    addi sp, sp, -16
+; RV32IF-NEXT:    sw ra, 12(sp)
+; RV32IF-NEXT:    call sqrt
+; RV32IF-NEXT:    lw ra, 12(sp)
+; RV32IF-NEXT:    addi sp, sp, 16
+; RV32IF-NEXT:    ret
+;
+; RV32IFD-LABEL: fsqrt_d:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    addi sp, sp, -16
+; RV32IFD-NEXT:    sw ra, 12(sp)
+; RV32IFD-NEXT:    call sqrt
+; RV32IFD-NEXT:    lw ra, 12(sp)
+; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    ret
+;
+; RV64IF-LABEL: fsqrt_d:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    addi sp, sp, -16
+; RV64IF-NEXT:    sd ra, 8(sp)
+; RV64IF-NEXT:    call sqrt
+; RV64IF-NEXT:    ld ra, 8(sp)
+; RV64IF-NEXT:    addi sp, sp, 16
+; RV64IF-NEXT:    ret
+;
+; RV64IFD-LABEL: fsqrt_d:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    addi sp, sp, -16
+; RV64IFD-NEXT:    sd ra, 8(sp)
+; RV64IFD-NEXT:    call sqrt
+; RV64IFD-NEXT:    ld ra, 8(sp)
+; RV64IFD-NEXT:    addi sp, sp, 16
+; RV64IFD-NEXT:    ret
+  %1 = call double @llvm.sqrt.f64(double %a)
+  ret double %1
+}