diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h --- a/llvm/lib/Target/RISCV/RISCVISelLowering.h +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h @@ -664,6 +664,8 @@ SDValue lowerGET_ROUNDING(SDValue Op, SelectionDAG &DAG) const; SDValue lowerSET_ROUNDING(SDValue Op, SelectionDAG &DAG) const; + SDValue lowerEH_DWARF_CFA(SDValue Op, SelectionDAG &DAG) const; + SDValue expandUnalignedRVVLoad(SDValue Op, SelectionDAG &DAG) const; SDValue expandUnalignedRVVStore(SDValue Op, SelectionDAG &DAG) const; diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -187,10 +187,15 @@ setOperationAction({ISD::VAARG, ISD::VACOPY, ISD::VAEND}, MVT::Other, Expand); setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); + + setOperationAction(ISD::EH_DWARF_CFA, MVT::i32, Custom); + if (!Subtarget.hasStdExtZbb()) setOperationAction(ISD::SIGN_EXTEND_INREG, {MVT::i8, MVT::i16}, Expand); if (Subtarget.is64Bit()) { + setOperationAction(ISD::EH_DWARF_CFA, MVT::i64, Custom); + setOperationAction({ISD::ADD, ISD::SUB, ISD::SHL, ISD::SRA, ISD::SRL}, MVT::i32, Custom); @@ -3422,6 +3427,8 @@ return lowerGET_ROUNDING(Op, DAG); case ISD::SET_ROUNDING: return lowerSET_ROUNDING(Op, DAG); + case ISD::EH_DWARF_CFA: + return lowerEH_DWARF_CFA(Op, DAG); case ISD::VP_SELECT: return lowerVPOp(Op, DAG, RISCVISD::VSELECT_VL); case ISD::VP_MERGE: @@ -6602,6 +6609,17 @@ RMValue); } +SDValue RISCVTargetLowering::lowerEH_DWARF_CFA(SDValue Op, + SelectionDAG &DAG) const { + MachineFunction &MF = DAG.getMachineFunction(); + + bool isRISCV64 = Subtarget.is64Bit(); + EVT PtrVT = getPointerTy(DAG.getDataLayout()); + + int FI = MF.getFrameInfo().CreateFixedObject(isRISCV64 ? 8 : 4, 0, false); + return DAG.getFrameIndex(FI, PtrVT); +} + static RISCVISD::NodeType getRISCVWOpcodeByIntr(unsigned IntNo) { switch (IntNo) { default: diff --git a/llvm/test/CodeGen/RISCV/eh-dwarf-cfa.ll b/llvm/test/CodeGen/RISCV/eh-dwarf-cfa.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/eh-dwarf-cfa.ll @@ -0,0 +1,37 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 < %s | FileCheck -check-prefix=RV32 %s +; RUN: llc -mtriple=riscv64 < %s | FileCheck -check-prefix=RV64 %s + +define void @dwarf() { +; RV32-LABEL: dwarf: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32-NEXT: .cfi_offset ra, -4 +; RV32-NEXT: addi a0, sp, 16 +; RV32-NEXT: call foo@plt +; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: dwarf: +; RV64: # %bb.0: # %entry +; RV64-NEXT: addi sp, sp, -16 +; RV64-NEXT: .cfi_def_cfa_offset 16 +; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64-NEXT: .cfi_offset ra, -8 +; RV64-NEXT: addi a0, sp, 16 +; RV64-NEXT: call foo@plt +; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64-NEXT: addi sp, sp, 16 +; RV64-NEXT: ret +entry: + %0 = call i8* @llvm.eh.dwarf.cfa(i32 0) + call void @foo(i8* %0) + ret void +} + +declare void @foo(i8*) + +declare i8* @llvm.eh.dwarf.cfa(i32) nounwind