Index: include/llvm/ADT/Triple.h =================================================================== --- include/llvm/ADT/Triple.h +++ include/llvm/ADT/Triple.h @@ -46,54 +46,55 @@ enum ArchType { UnknownArch, - arm, // ARM (little endian): arm, armv.*, xscale - armeb, // ARM (big endian): armeb - aarch64, // AArch64 (little endian): aarch64 - aarch64_be, // AArch64 (big endian): aarch64_be - arc, // ARC: Synopsys ARC - avr, // AVR: Atmel AVR microcontroller - bpfel, // eBPF or extended BPF or 64-bit BPF (little endian) - bpfeb, // eBPF or extended BPF or 64-bit BPF (big endian) - hexagon, // Hexagon: hexagon - mips, // MIPS: mips, mipsallegrex, mipsr6 - mipsel, // MIPSEL: mipsel, mipsallegrexe, mipsr6el - mips64, // MIPS64: mips64, mips64r6, mipsn32, mipsn32r6 - mips64el, // MIPS64EL: mips64el, mips64r6el, mipsn32el, mipsn32r6el - msp430, // MSP430: msp430 - nios2, // NIOSII: nios2 - ppc, // PPC: powerpc - ppc64, // PPC64: powerpc64, ppu - ppc64le, // PPC64LE: powerpc64le - r600, // R600: AMD GPUs HD2XXX - HD6XXX - amdgcn, // AMDGCN: AMD GCN GPUs - riscv32, // RISC-V (32-bit): riscv32 - riscv64, // RISC-V (64-bit): riscv64 - sparc, // Sparc: sparc - sparcv9, // Sparcv9: Sparcv9 - sparcel, // Sparc: (endianness = little). NB: 'Sparcle' is a CPU variant - systemz, // SystemZ: s390x - tce, // TCE (http://tce.cs.tut.fi/): tce - tcele, // TCE little endian (http://tce.cs.tut.fi/): tcele - thumb, // Thumb (little endian): thumb, thumbv.* - thumbeb, // Thumb (big endian): thumbeb - x86, // X86: i[3-9]86 - x86_64, // X86-64: amd64, x86_64 - xcore, // XCore: xcore - nvptx, // NVPTX: 32-bit - nvptx64, // NVPTX: 64-bit - le32, // le32: generic little-endian 32-bit CPU (PNaCl) - le64, // le64: generic little-endian 64-bit CPU (PNaCl) - amdil, // AMDIL - amdil64, // AMDIL with 64-bit pointers - hsail, // AMD HSAIL - hsail64, // AMD HSAIL with 64-bit pointers - spir, // SPIR: standard portable IR for OpenCL 32-bit version - spir64, // SPIR: standard portable IR for OpenCL 64-bit version - kalimba, // Kalimba: generic kalimba - shave, // SHAVE: Movidius vector VLIW processors - lanai, // Lanai: Lanai 32-bit - wasm32, // WebAssembly with 32-bit pointers - wasm64, // WebAssembly with 64-bit pointers + arm, // ARM (little endian): arm, armv.*, xscale + armeb, // ARM (big endian): armeb + aarch64, // AArch64 (little endian): aarch64 + aarch64_be, // AArch64 (big endian): aarch64_be + arc, // ARC: Synopsys ARC + avr, // AVR: Atmel AVR microcontroller + bpfel, // eBPF or extended BPF or 64-bit BPF (little endian) + bpfeb, // eBPF or extended BPF or 64-bit BPF (big endian) + hexagon, // Hexagon: hexagon + mips, // MIPS: mips, mipsallegrex, mipsr6 + mipsel, // MIPSEL: mipsel, mipsallegrexe, mipsr6el + mips64, // MIPS64: mips64, mips64r6, mipsn32, mipsn32r6 + mips64el, // MIPS64EL: mips64el, mips64r6el, mipsn32el, mipsn32r6el + msp430, // MSP430: msp430 + nios2, // NIOSII: nios2 + ppc, // PPC: powerpc + ppc64, // PPC64: powerpc64, ppu + ppc64le, // PPC64LE: powerpc64le + r600, // R600: AMD GPUs HD2XXX - HD6XXX + amdgcn, // AMDGCN: AMD GCN GPUs + riscv32, // RISC-V (32-bit): riscv32 + riscv32e, // RISC-V (Embed): riscv32e + riscv64, // RISC-V (64-bit): riscv64 + sparc, // Sparc: sparc + sparcv9, // Sparcv9: Sparcv9 + sparcel, // Sparc: (endianness = little). NB: 'Sparcle' is a CPU variant + systemz, // SystemZ: s390x + tce, // TCE (http://tce.cs.tut.fi/): tce + tcele, // TCE little endian (http://tce.cs.tut.fi/): tcele + thumb, // Thumb (little endian): thumb, thumbv.* + thumbeb, // Thumb (big endian): thumbeb + x86, // X86: i[3-9]86 + x86_64, // X86-64: amd64, x86_64 + xcore, // XCore: xcore + nvptx, // NVPTX: 32-bit + nvptx64, // NVPTX: 64-bit + le32, // le32: generic little-endian 32-bit CPU (PNaCl) + le64, // le64: generic little-endian 64-bit CPU (PNaCl) + amdil, // AMDIL + amdil64, // AMDIL with 64-bit pointers + hsail, // AMD HSAIL + hsail64, // AMD HSAIL with 64-bit pointers + spir, // SPIR: standard portable IR for OpenCL 32-bit version + spir64, // SPIR: standard portable IR for OpenCL 64-bit version + kalimba, // Kalimba: generic kalimba + shave, // SHAVE: Movidius vector VLIW processors + lanai, // Lanai: Lanai 32-bit + wasm32, // WebAssembly with 32-bit pointers + wasm64, // WebAssembly with 64-bit pointers renderscript32, // 32-bit RenderScript renderscript64, // 64-bit RenderScript LastArchType = renderscript64 Index: lib/Support/Triple.cpp =================================================================== --- lib/Support/Triple.cpp +++ lib/Support/Triple.cpp @@ -42,6 +42,8 @@ case r600: return "r600"; case amdgcn: return "amdgcn"; case riscv32: return "riscv32"; + case riscv32e: + return "riscv32e"; case riscv64: return "riscv64"; case sparc: return "sparc"; case sparcv9: return "sparcv9"; @@ -145,6 +147,7 @@ case wasm64: return "wasm"; case riscv32: + case riscv32e: case riscv64: return "riscv"; } } @@ -259,58 +262,59 @@ Triple::ArchType Triple::getArchTypeForLLVMName(StringRef Name) { Triple::ArchType BPFArch(parseBPFArch(Name)); return StringSwitch(Name) - .Case("aarch64", aarch64) - .Case("aarch64_be", aarch64_be) - .Case("arc", arc) - .Case("arm64", aarch64) // "arm64" is an alias for "aarch64" - .Case("arm", arm) - .Case("armeb", armeb) - .Case("avr", avr) - .StartsWith("bpf", BPFArch) - .Case("mips", mips) - .Case("mipsel", mipsel) - .Case("mips64", mips64) - .Case("mips64el", mips64el) - .Case("msp430", msp430) - .Case("nios2", nios2) - .Case("ppc64", ppc64) - .Case("ppc32", ppc) - .Case("ppc", ppc) - .Case("ppc64le", ppc64le) - .Case("r600", r600) - .Case("amdgcn", amdgcn) - .Case("riscv32", riscv32) - .Case("riscv64", riscv64) - .Case("hexagon", hexagon) - .Case("sparc", sparc) - .Case("sparcel", sparcel) - .Case("sparcv9", sparcv9) - .Case("systemz", systemz) - .Case("tce", tce) - .Case("tcele", tcele) - .Case("thumb", thumb) - .Case("thumbeb", thumbeb) - .Case("x86", x86) - .Case("x86-64", x86_64) - .Case("xcore", xcore) - .Case("nvptx", nvptx) - .Case("nvptx64", nvptx64) - .Case("le32", le32) - .Case("le64", le64) - .Case("amdil", amdil) - .Case("amdil64", amdil64) - .Case("hsail", hsail) - .Case("hsail64", hsail64) - .Case("spir", spir) - .Case("spir64", spir64) - .Case("kalimba", kalimba) - .Case("lanai", lanai) - .Case("shave", shave) - .Case("wasm32", wasm32) - .Case("wasm64", wasm64) - .Case("renderscript32", renderscript32) - .Case("renderscript64", renderscript64) - .Default(UnknownArch); + .Case("aarch64", aarch64) + .Case("aarch64_be", aarch64_be) + .Case("arc", arc) + .Case("arm64", aarch64) // "arm64" is an alias for "aarch64" + .Case("arm", arm) + .Case("armeb", armeb) + .Case("avr", avr) + .StartsWith("bpf", BPFArch) + .Case("mips", mips) + .Case("mipsel", mipsel) + .Case("mips64", mips64) + .Case("mips64el", mips64el) + .Case("msp430", msp430) + .Case("nios2", nios2) + .Case("ppc64", ppc64) + .Case("ppc32", ppc) + .Case("ppc", ppc) + .Case("ppc64le", ppc64le) + .Case("r600", r600) + .Case("amdgcn", amdgcn) + .Case("riscv32", riscv32) + .Case("riscve", riscv32e) + .Case("riscv64", riscv64) + .Case("hexagon", hexagon) + .Case("sparc", sparc) + .Case("sparcel", sparcel) + .Case("sparcv9", sparcv9) + .Case("systemz", systemz) + .Case("tce", tce) + .Case("tcele", tcele) + .Case("thumb", thumb) + .Case("thumbeb", thumbeb) + .Case("x86", x86) + .Case("x86-64", x86_64) + .Case("xcore", xcore) + .Case("nvptx", nvptx) + .Case("nvptx64", nvptx64) + .Case("le32", le32) + .Case("le64", le64) + .Case("amdil", amdil) + .Case("amdil64", amdil64) + .Case("hsail", hsail) + .Case("hsail64", hsail64) + .Case("spir", spir) + .Case("spir64", spir64) + .Case("kalimba", kalimba) + .Case("lanai", lanai) + .Case("shave", shave) + .Case("wasm32", wasm32) + .Case("wasm64", wasm64) + .Case("renderscript32", renderscript32) + .Case("renderscript64", renderscript64) + .Default(UnknownArch); } static Triple::ArchType parseARMArch(StringRef ArchName) { @@ -380,64 +384,65 @@ static Triple::ArchType parseArch(StringRef ArchName) { auto AT = StringSwitch(ArchName) - .Cases("i386", "i486", "i586", "i686", Triple::x86) - // FIXME: Do we need to support these? - .Cases("i786", "i886", "i986", Triple::x86) - .Cases("amd64", "x86_64", "x86_64h", Triple::x86_64) - .Cases("powerpc", "ppc", "ppc32", Triple::ppc) - .Cases("powerpc64", "ppu", "ppc64", Triple::ppc64) - .Cases("powerpc64le", "ppc64le", Triple::ppc64le) - .Case("xscale", Triple::arm) - .Case("xscaleeb", Triple::armeb) - .Case("aarch64", Triple::aarch64) - .Case("aarch64_be", Triple::aarch64_be) - .Case("arc", Triple::arc) - .Case("arm64", Triple::aarch64) - .Case("arm", Triple::arm) - .Case("armeb", Triple::armeb) - .Case("thumb", Triple::thumb) - .Case("thumbeb", Triple::thumbeb) - .Case("avr", Triple::avr) - .Case("msp430", Triple::msp430) - .Cases("mips", "mipseb", "mipsallegrex", "mipsisa32r6", - "mipsr6", Triple::mips) - .Cases("mipsel", "mipsallegrexel", "mipsisa32r6el", "mipsr6el", - Triple::mipsel) - .Cases("mips64", "mips64eb", "mipsn32", "mipsisa64r6", - "mips64r6", "mipsn32r6", Triple::mips64) - .Cases("mips64el", "mipsn32el", "mipsisa64r6el", "mips64r6el", - "mipsn32r6el", Triple::mips64el) - .Case("nios2", Triple::nios2) - .Case("r600", Triple::r600) - .Case("amdgcn", Triple::amdgcn) - .Case("riscv32", Triple::riscv32) - .Case("riscv64", Triple::riscv64) - .Case("hexagon", Triple::hexagon) - .Cases("s390x", "systemz", Triple::systemz) - .Case("sparc", Triple::sparc) - .Case("sparcel", Triple::sparcel) - .Cases("sparcv9", "sparc64", Triple::sparcv9) - .Case("tce", Triple::tce) - .Case("tcele", Triple::tcele) - .Case("xcore", Triple::xcore) - .Case("nvptx", Triple::nvptx) - .Case("nvptx64", Triple::nvptx64) - .Case("le32", Triple::le32) - .Case("le64", Triple::le64) - .Case("amdil", Triple::amdil) - .Case("amdil64", Triple::amdil64) - .Case("hsail", Triple::hsail) - .Case("hsail64", Triple::hsail64) - .Case("spir", Triple::spir) - .Case("spir64", Triple::spir64) - .StartsWith("kalimba", Triple::kalimba) - .Case("lanai", Triple::lanai) - .Case("shave", Triple::shave) - .Case("wasm32", Triple::wasm32) - .Case("wasm64", Triple::wasm64) - .Case("renderscript32", Triple::renderscript32) - .Case("renderscript64", Triple::renderscript64) - .Default(Triple::UnknownArch); + .Cases("i386", "i486", "i586", "i686", Triple::x86) + // FIXME: Do we need to support these? + .Cases("i786", "i886", "i986", Triple::x86) + .Cases("amd64", "x86_64", "x86_64h", Triple::x86_64) + .Cases("powerpc", "ppc", "ppc32", Triple::ppc) + .Cases("powerpc64", "ppu", "ppc64", Triple::ppc64) + .Cases("powerpc64le", "ppc64le", Triple::ppc64le) + .Case("xscale", Triple::arm) + .Case("xscaleeb", Triple::armeb) + .Case("aarch64", Triple::aarch64) + .Case("aarch64_be", Triple::aarch64_be) + .Case("arc", Triple::arc) + .Case("arm64", Triple::aarch64) + .Case("arm", Triple::arm) + .Case("armeb", Triple::armeb) + .Case("thumb", Triple::thumb) + .Case("thumbeb", Triple::thumbeb) + .Case("avr", Triple::avr) + .Case("msp430", Triple::msp430) + .Cases("mips", "mipseb", "mipsallegrex", "mipsisa32r6", + "mipsr6", Triple::mips) + .Cases("mipsel", "mipsallegrexel", "mipsisa32r6el", "mipsr6el", + Triple::mipsel) + .Cases("mips64", "mips64eb", "mipsn32", "mipsisa64r6", + "mips64r6", "mipsn32r6", Triple::mips64) + .Cases("mips64el", "mipsn32el", "mipsisa64r6el", "mips64r6el", + "mipsn32r6el", Triple::mips64el) + .Case("nios2", Triple::nios2) + .Case("r600", Triple::r600) + .Case("amdgcn", Triple::amdgcn) + .Case("riscv32", Triple::riscv32) + .Case("riscv32e", Triple::riscv32e) + .Case("riscv64", Triple::riscv64) + .Case("hexagon", Triple::hexagon) + .Cases("s390x", "systemz", Triple::systemz) + .Case("sparc", Triple::sparc) + .Case("sparcel", Triple::sparcel) + .Cases("sparcv9", "sparc64", Triple::sparcv9) + .Case("tce", Triple::tce) + .Case("tcele", Triple::tcele) + .Case("xcore", Triple::xcore) + .Case("nvptx", Triple::nvptx) + .Case("nvptx64", Triple::nvptx64) + .Case("le32", Triple::le32) + .Case("le64", Triple::le64) + .Case("amdil", Triple::amdil) + .Case("amdil64", Triple::amdil64) + .Case("hsail", Triple::hsail) + .Case("hsail64", Triple::hsail64) + .Case("spir", Triple::spir) + .Case("spir64", Triple::spir64) + .StartsWith("kalimba", Triple::kalimba) + .Case("lanai", Triple::lanai) + .Case("shave", Triple::shave) + .Case("wasm32", Triple::wasm32) + .Case("wasm64", Triple::wasm64) + .Case("renderscript32", Triple::renderscript32) + .Case("renderscript64", Triple::renderscript64) + .Default(Triple::UnknownArch); // Some architectures require special parsing logic just to compute the // ArchType result. @@ -671,6 +676,7 @@ case Triple::renderscript32: case Triple::renderscript64: case Triple::riscv32: + case Triple::riscv32e: case Triple::riscv64: case Triple::shave: case Triple::sparc: @@ -1226,6 +1232,7 @@ case llvm::Triple::ppc: case llvm::Triple::r600: case llvm::Triple::riscv32: + case llvm::Triple::riscv32e: case llvm::Triple::sparc: case llvm::Triple::sparcel: case llvm::Triple::tce: @@ -1311,6 +1318,7 @@ case Triple::ppc: case Triple::r600: case Triple::riscv32: + case Triple::riscv32e: case Triple::sparc: case Triple::sparcel: case Triple::tce: @@ -1362,6 +1370,7 @@ case Triple::xcore: case Triple::sparcel: case Triple::shave: + case Triple::riscv32e: T.setArch(UnknownArch); break; @@ -1432,6 +1441,7 @@ case Triple::nvptx: case Triple::r600: case Triple::riscv32: + case Triple::riscv32e: case Triple::riscv64: case Triple::shave: case Triple::spir64: @@ -1520,6 +1530,7 @@ case Triple::ppc64le: case Triple::r600: case Triple::riscv32: + case Triple::riscv32e: case Triple::riscv64: case Triple::shave: case Triple::sparcel: Index: lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp =================================================================== --- lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp +++ lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp @@ -1440,4 +1440,5 @@ extern "C" void LLVMInitializeRISCVAsmParser() { RegisterMCAsmParser X(getTheRISCV32Target()); RegisterMCAsmParser Y(getTheRISCV64Target()); + RegisterMCAsmParser Z(getTheRISCV32ETarget()); } Index: lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp =================================================================== --- lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp +++ lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp @@ -54,6 +54,8 @@ createRISCVDisassembler); TargetRegistry::RegisterMCDisassembler(getTheRISCV64Target(), createRISCVDisassembler); + TargetRegistry::RegisterMCDisassembler(getTheRISCV32ETarget(), + createRISCVDisassembler); } static const unsigned GPRDecoderTable[] = { Index: lib/Target/RISCV/MCTargetDesc/RISCVMCTargetDesc.h =================================================================== --- lib/Target/RISCV/MCTargetDesc/RISCVMCTargetDesc.h +++ lib/Target/RISCV/MCTargetDesc/RISCVMCTargetDesc.h @@ -35,6 +35,7 @@ Target &getTheRISCV32Target(); Target &getTheRISCV64Target(); +Target &getTheRISCV32ETarget(); MCCodeEmitter *createRISCVMCCodeEmitter(const MCInstrInfo &MCII, const MCRegisterInfo &MRI, Index: lib/Target/RISCV/MCTargetDesc/RISCVMCTargetDesc.cpp =================================================================== --- lib/Target/RISCV/MCTargetDesc/RISCVMCTargetDesc.cpp +++ lib/Target/RISCV/MCTargetDesc/RISCVMCTargetDesc.cpp @@ -56,8 +56,13 @@ static MCSubtargetInfo *createRISCVMCSubtargetInfo(const Triple &TT, StringRef CPU, StringRef FS) { std::string CPUName = CPU; - if (CPUName.empty()) - CPUName = TT.isArch64Bit() ? "generic-rv64" : "generic-rv32"; + if (CPUName.empty()) { + if (TT.getArch() == llvm::Triple::riscv32e) { + CPUName = "generic-rv32e"; + } else { + CPUName = TT.isArch64Bit() ? "generic-rv64" : "generic-rv32"; + } + } return createRISCVMCSubtargetInfoImpl(TT, CPUName, FS); } @@ -85,7 +90,8 @@ } extern "C" void LLVMInitializeRISCVTargetMC() { - for (Target *T : {&getTheRISCV32Target(), &getTheRISCV64Target()}) { + for (Target *T : {&getTheRISCV32Target(), &getTheRISCV64Target(), + &getTheRISCV32ETarget()}) { TargetRegistry::RegisterMCAsmInfo(*T, createRISCVMCAsmInfo); TargetRegistry::RegisterMCInstrInfo(*T, createRISCVMCInstrInfo); TargetRegistry::RegisterMCRegInfo(*T, createRISCVMCRegisterInfo); Index: lib/Target/RISCV/RISCV.td =================================================================== --- lib/Target/RISCV/RISCV.td +++ lib/Target/RISCV/RISCV.td @@ -47,13 +47,19 @@ def Feature64Bit : SubtargetFeature<"64bit", "HasRV64", "true", "Implements RV64">; +def FeatureEmbed + : SubtargetFeature<"Embed", "HasEmbed", "true", "Implements RV32E">; + def IsRV64 : Predicate<"Subtarget->is64Bit()">, AssemblerPredicate<"Feature64Bit">; def IsRV32 : Predicate<"!Subtarget->is64Bit()">, AssemblerPredicate<"!Feature64Bit">; +def IsRVEmbed : Predicate<"Subtarget->isEmbed()">, + AssemblerPredicate<"FeatureEmbed">; def RV64 : HwMode<"+64bit">; def RV32 : HwMode<"-64bit">; +def RV32E : HwMode<"+Embed">; def FeatureRelax : SubtargetFeature<"relax", "EnableLinkerRelax", "true", @@ -81,6 +87,8 @@ def : ProcessorModel<"generic-rv64", NoSchedModel, [Feature64Bit]>; +def : ProcessorModel<"generic-rv32e", NoSchedModel, [FeatureEmbed]>; + //===----------------------------------------------------------------------===// // Define the RISC-V target. //===----------------------------------------------------------------------===// Index: lib/Target/RISCV/RISCVAsmPrinter.cpp =================================================================== --- lib/Target/RISCV/RISCVAsmPrinter.cpp +++ lib/Target/RISCV/RISCVAsmPrinter.cpp @@ -135,4 +135,5 @@ extern "C" void LLVMInitializeRISCVAsmPrinter() { RegisterAsmPrinter X(getTheRISCV32Target()); RegisterAsmPrinter Y(getTheRISCV64Target()); + RegisterAsmPrinter Z(getTheRISCV32ETarget()); } Index: lib/Target/RISCV/RISCVCallingConv.td =================================================================== --- lib/Target/RISCV/RISCVCallingConv.td +++ lib/Target/RISCV/RISCVCallingConv.td @@ -15,6 +15,7 @@ // RISCVISelLowering.cpp (CC_RISCV). def CSR : CalleeSavedRegs<(add X1, X3, X4, X8, X9, (sequence "X%u", 18, 27))>; +def CSR_E : CalleeSavedRegs<(add X1, X3, X4, X8, X9)>; // Needed for implementation of RISCVRegisterInfo::getNoPreservedMask() def CSR_NoRegs : CalleeSavedRegs<(add)>; @@ -27,6 +28,11 @@ (sequence "X%u", 12, 17), (sequence "X%u", 18, 27), (sequence "X%u", 28, 31))>; +def CSR_E_Interrupt : CalleeSavedRegs<(add X1, + (sequence "X%u", 3, 9), + (sequence "X%u", 10, 11), + (sequence "X%u", 12, 15))>; + // Same as CSR_Interrupt, but including all 32-bit FP registers. def CSR_XLEN_F32_Interrupt: CalleeSavedRegs<(add X1, Index: lib/Target/RISCV/RISCVFrameLowering.h =================================================================== --- lib/Target/RISCV/RISCVFrameLowering.h +++ lib/Target/RISCV/RISCVFrameLowering.h @@ -21,11 +21,7 @@ class RISCVFrameLowering : public TargetFrameLowering { public: - explicit RISCVFrameLowering(const RISCVSubtarget &STI) - : TargetFrameLowering(StackGrowsDown, - /*StackAlignment=*/16, - /*LocalAreaOffset=*/0), - STI(STI) {} + explicit RISCVFrameLowering(const RISCVSubtarget &STIT); void emitPrologue(MachineFunction &MF, MachineBasicBlock &MBB) const override; void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const override; Index: lib/Target/RISCV/RISCVFrameLowering.cpp =================================================================== --- lib/Target/RISCV/RISCVFrameLowering.cpp +++ lib/Target/RISCV/RISCVFrameLowering.cpp @@ -22,6 +22,12 @@ using namespace llvm; +RISCVFrameLowering::RISCVFrameLowering(const RISCVSubtarget &STI) + : TargetFrameLowering(StackGrowsDown, + STI.isEmbed() ? 4 : /*StackAlignment=*/16, + /*LocalAreaOffset=*/0), + STI(STI) {} + bool RISCVFrameLowering::hasFP(const MachineFunction &MF) const { const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo(); Index: lib/Target/RISCV/RISCVISelLowering.cpp =================================================================== --- lib/Target/RISCV/RISCVISelLowering.cpp +++ lib/Target/RISCV/RISCVISelLowering.cpp @@ -39,10 +39,17 @@ STATISTIC(NumTailCalls, "Number of tail calls"); +static bool isRVE = false; + RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM, const RISCVSubtarget &STI) : TargetLowering(TM), Subtarget(STI) { + if (STI.isEmbed()) + isRVE = true; + else + isRVE = false; + MVT XLenVT = Subtarget.getXLenVT(); // Set up the register classes. @@ -681,168 +688,330 @@ RISCV::X14, RISCV::X15, RISCV::X16, RISCV::X17 }; +static const MCPhysReg EArgGPRs[] = {RISCV::X10, RISCV::X11, RISCV::X12, + RISCV::X13, RISCV::X14, RISCV::X15}; + // Pass a 2*XLEN argument that has been split into two XLEN values through // registers or the stack as necessary. static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1, ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2, MVT ValVT2, MVT LocVT2, ISD::ArgFlagsTy ArgFlags2) { - unsigned XLenInBytes = XLen / 8; - if (unsigned Reg = State.AllocateReg(ArgGPRs)) { - // At least one half can be passed via register. - State.addLoc(CCValAssign::getReg(VA1.getValNo(), VA1.getValVT(), Reg, - VA1.getLocVT(), CCValAssign::Full)); - } else { - // Both halves must be passed on the stack, with proper alignment. - unsigned StackAlign = std::max(XLenInBytes, ArgFlags1.getOrigAlign()); - State.addLoc( - CCValAssign::getMem(VA1.getValNo(), VA1.getValVT(), - State.AllocateStack(XLenInBytes, StackAlign), - VA1.getLocVT(), CCValAssign::Full)); - State.addLoc(CCValAssign::getMem( - ValNo2, ValVT2, State.AllocateStack(XLenInBytes, XLenInBytes), LocVT2, - CCValAssign::Full)); - return false; - } + if (isRVE) { + unsigned XLenInBytes = XLen / 8; + if (unsigned Reg = State.AllocateReg(EArgGPRs)) { + // At least one half can be passed via register. + State.addLoc(CCValAssign::getReg(VA1.getValNo(), VA1.getValVT(), Reg, + VA1.getLocVT(), CCValAssign::Full)); + } else { + // Both halves must be passed on the stack, with proper alignment. + unsigned StackAlign = std::max(XLenInBytes, ArgFlags1.getOrigAlign()); + State.addLoc( + CCValAssign::getMem(VA1.getValNo(), VA1.getValVT(), + State.AllocateStack(XLenInBytes, StackAlign), + VA1.getLocVT(), CCValAssign::Full)); + State.addLoc(CCValAssign::getMem( + ValNo2, ValVT2, State.AllocateStack(XLenInBytes, XLenInBytes), LocVT2, + CCValAssign::Full)); + return false; + } - if (unsigned Reg = State.AllocateReg(ArgGPRs)) { - // The second half can also be passed via register. - State.addLoc( - CCValAssign::getReg(ValNo2, ValVT2, Reg, LocVT2, CCValAssign::Full)); + if (unsigned Reg = State.AllocateReg(EArgGPRs)) { + // The second half can also be passed via register. + State.addLoc( + CCValAssign::getReg(ValNo2, ValVT2, Reg, LocVT2, CCValAssign::Full)); + } else { + // The second half is passed via the stack, without additional alignment. + State.addLoc(CCValAssign::getMem( + ValNo2, ValVT2, State.AllocateStack(XLenInBytes, XLenInBytes), LocVT2, + CCValAssign::Full)); + } + + return false; } else { - // The second half is passed via the stack, without additional alignment. - State.addLoc(CCValAssign::getMem( - ValNo2, ValVT2, State.AllocateStack(XLenInBytes, XLenInBytes), LocVT2, - CCValAssign::Full)); - } + unsigned XLenInBytes = XLen / 8; + if (unsigned Reg = State.AllocateReg(ArgGPRs)) { + // At least one half can be passed via register. + State.addLoc(CCValAssign::getReg(VA1.getValNo(), VA1.getValVT(), Reg, + VA1.getLocVT(), CCValAssign::Full)); + } else { + // Both halves must be passed on the stack, with proper alignment. + unsigned StackAlign = std::max(XLenInBytes, ArgFlags1.getOrigAlign()); + State.addLoc( + CCValAssign::getMem(VA1.getValNo(), VA1.getValVT(), + State.AllocateStack(XLenInBytes, StackAlign), + VA1.getLocVT(), CCValAssign::Full)); + State.addLoc(CCValAssign::getMem( + ValNo2, ValVT2, State.AllocateStack(XLenInBytes, XLenInBytes), LocVT2, + CCValAssign::Full)); + return false; + } + + if (unsigned Reg = State.AllocateReg(ArgGPRs)) { + // The second half can also be passed via register. + State.addLoc( + CCValAssign::getReg(ValNo2, ValVT2, Reg, LocVT2, CCValAssign::Full)); + } else { + // The second half is passed via the stack, without additional alignment. + State.addLoc(CCValAssign::getMem( + ValNo2, ValVT2, State.AllocateStack(XLenInBytes, XLenInBytes), LocVT2, + CCValAssign::Full)); + } - return false; + return false; + } } // Implements the RISC-V calling convention. Returns true upon failure. static bool CC_RISCV(const DataLayout &DL, unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed, bool IsRet, Type *OrigTy) { - unsigned XLen = DL.getLargestLegalIntTypeSizeInBits(); - assert(XLen == 32 || XLen == 64); - MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64; - if (ValVT == MVT::f32) { - LocVT = MVT::i32; - LocInfo = CCValAssign::BCvt; - } + if (isRVE) { + unsigned XLen = DL.getLargestLegalIntTypeSizeInBits(); + assert(XLen == 32 || XLen == 64); + MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64; + if (ValVT == MVT::f32) { + LocVT = MVT::i32; + LocInfo = CCValAssign::BCvt; + } - // Any return value split in to more than two values can't be returned - // directly. - if (IsRet && ValNo > 1) - return true; + // Any return value split in to more than two values can't be returned + // directly. + if (IsRet && ValNo > 1) + return true; - // If this is a variadic argument, the RISC-V calling convention requires - // that it is assigned an 'even' or 'aligned' register if it has 8-byte - // alignment (RV32) or 16-byte alignment (RV64). An aligned register should - // be used regardless of whether the original argument was split during - // legalisation or not. The argument will not be passed by registers if the - // original type is larger than 2*XLEN, so the register alignment rule does - // not apply. - unsigned TwoXLenInBytes = (2 * XLen) / 8; - if (!IsFixed && ArgFlags.getOrigAlign() == TwoXLenInBytes && - DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes) { - unsigned RegIdx = State.getFirstUnallocated(ArgGPRs); - // Skip 'odd' register if necessary. - if (RegIdx != array_lengthof(ArgGPRs) && RegIdx % 2 == 1) - State.AllocateReg(ArgGPRs); - } + // If this is a variadic argument, the RISC-V calling convention requires + // that it is assigned an 'even' or 'aligned' register if it has 8-byte + // alignment (RV32) or 16-byte alignment (RV64). An aligned register should + // be used regardless of whether the original argument was split during + // legalisation or not. The argument will not be passed by registers if the + // original type is larger than 2*XLEN, so the register alignment rule does + // not apply. + unsigned TwoXLenInBytes = (2 * XLen) / 8; + if (!IsFixed && ArgFlags.getOrigAlign() == TwoXLenInBytes && + DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes) { + unsigned RegIdx = State.getFirstUnallocated(EArgGPRs); + // Skip 'odd' register if necessary. + if (RegIdx != array_lengthof(EArgGPRs) && RegIdx % 2 == 1) + State.AllocateReg(EArgGPRs); + } - SmallVectorImpl &PendingLocs = State.getPendingLocs(); - SmallVectorImpl &PendingArgFlags = - State.getPendingArgFlags(); - - assert(PendingLocs.size() == PendingArgFlags.size() && - "PendingLocs and PendingArgFlags out of sync"); - - // Handle passing f64 on RV32D with a soft float ABI. - if (XLen == 32 && ValVT == MVT::f64) { - assert(!ArgFlags.isSplit() && PendingLocs.empty() && - "Can't lower f64 if it is split"); - // Depending on available argument GPRS, f64 may be passed in a pair of - // GPRs, split between a GPR and the stack, or passed completely on the - // stack. LowerCall/LowerFormalArguments/LowerReturn must recognise these - // cases. - unsigned Reg = State.AllocateReg(ArgGPRs); - LocVT = MVT::i32; - if (!Reg) { - unsigned StackOffset = State.AllocateStack(8, 8); - State.addLoc( - CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo)); + SmallVectorImpl &PendingLocs = State.getPendingLocs(); + SmallVectorImpl &PendingArgFlags = + State.getPendingArgFlags(); + + assert(PendingLocs.size() == PendingArgFlags.size() && + "PendingLocs and PendingArgFlags out of sync"); + + // Handle passing f64 on RV32D with a soft float ABI. + if (XLen == 32 && ValVT == MVT::f64) { + assert(!ArgFlags.isSplit() && PendingLocs.empty() && + "Can't lower f64 if it is split"); + // Depending on available argument GPRS, f64 may be passed in a pair of + // GPRs, split between a GPR and the stack, or passed completely on the + // stack. LowerCall/LowerFormalArguments/LowerReturn must recognise these + // cases. + unsigned Reg = State.AllocateReg(EArgGPRs); + LocVT = MVT::i32; + if (!Reg) { + unsigned StackOffset = State.AllocateStack(8, 8); + State.addLoc( + CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo)); + return false; + } + if (!State.AllocateReg(EArgGPRs)) + State.AllocateStack(4, 4); + State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); return false; } - if (!State.AllocateReg(ArgGPRs)) - State.AllocateStack(4, 4); - State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); - return false; - } - // Split arguments might be passed indirectly, so keep track of the pending - // values. - if (ArgFlags.isSplit() || !PendingLocs.empty()) { - LocVT = XLenVT; - LocInfo = CCValAssign::Indirect; - PendingLocs.push_back( - CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo)); - PendingArgFlags.push_back(ArgFlags); - if (!ArgFlags.isSplitEnd()) { + // Split arguments might be passed indirectly, so keep track of the pending + // values. + if (ArgFlags.isSplit() || !PendingLocs.empty()) { + LocVT = XLenVT; + LocInfo = CCValAssign::Indirect; + PendingLocs.push_back( + CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo)); + PendingArgFlags.push_back(ArgFlags); + if (!ArgFlags.isSplitEnd()) { + return false; + } + } + + // If the split argument only had two elements, it should be passed directly + // in registers or on the stack. + if (ArgFlags.isSplitEnd() && PendingLocs.size() <= 2) { + assert(PendingLocs.size() == 2 && "Unexpected PendingLocs.size()"); + // Apply the normal calling convention rules to the first half of the + // split argument. + CCValAssign VA = PendingLocs[0]; + ISD::ArgFlagsTy AF = PendingArgFlags[0]; + PendingLocs.clear(); + PendingArgFlags.clear(); + return CC_RISCVAssign2XLen(XLen, State, VA, AF, ValNo, ValVT, LocVT, + ArgFlags); + } + + // Allocate to a register if possible, or else a stack slot. + unsigned Reg = State.AllocateReg(EArgGPRs); + unsigned StackOffset = Reg ? 0 : State.AllocateStack(XLen / 8, XLen / 8); + + // If we reach this point and PendingLocs is non-empty, we must be at the + // end of a split argument that must be passed indirectly. + if (!PendingLocs.empty()) { + assert(ArgFlags.isSplitEnd() && "Expected ArgFlags.isSplitEnd()"); + assert(PendingLocs.size() > 2 && "Unexpected PendingLocs.size()"); + + for (auto &It : PendingLocs) { + if (Reg) + It.convertToReg(Reg); + else + It.convertToMem(StackOffset); + State.addLoc(It); + } + PendingLocs.clear(); + PendingArgFlags.clear(); return false; } - } - // If the split argument only had two elements, it should be passed directly - // in registers or on the stack. - if (ArgFlags.isSplitEnd() && PendingLocs.size() <= 2) { - assert(PendingLocs.size() == 2 && "Unexpected PendingLocs.size()"); - // Apply the normal calling convention rules to the first half of the - // split argument. - CCValAssign VA = PendingLocs[0]; - ISD::ArgFlagsTy AF = PendingArgFlags[0]; - PendingLocs.clear(); - PendingArgFlags.clear(); - return CC_RISCVAssign2XLen(XLen, State, VA, AF, ValNo, ValVT, LocVT, - ArgFlags); - } + assert(LocVT == XLenVT && "Expected an XLenVT at this stage"); + + if (Reg) { + State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); + return false; + } - // Allocate to a register if possible, or else a stack slot. - unsigned Reg = State.AllocateReg(ArgGPRs); - unsigned StackOffset = Reg ? 0 : State.AllocateStack(XLen / 8, XLen / 8); - - // If we reach this point and PendingLocs is non-empty, we must be at the - // end of a split argument that must be passed indirectly. - if (!PendingLocs.empty()) { - assert(ArgFlags.isSplitEnd() && "Expected ArgFlags.isSplitEnd()"); - assert(PendingLocs.size() > 2 && "Unexpected PendingLocs.size()"); - - for (auto &It : PendingLocs) { - if (Reg) - It.convertToReg(Reg); - else - It.convertToMem(StackOffset); - State.addLoc(It); + if (ValVT == MVT::f32) { + LocVT = MVT::f32; + LocInfo = CCValAssign::Full; } - PendingLocs.clear(); - PendingArgFlags.clear(); + State.addLoc( + CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo)); return false; - } - assert(LocVT == XLenVT && "Expected an XLenVT at this stage"); + } else { + unsigned XLen = DL.getLargestLegalIntTypeSizeInBits(); + assert(XLen == 32 || XLen == 64); + MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64; + if (ValVT == MVT::f32) { + LocVT = MVT::i32; + LocInfo = CCValAssign::BCvt; + } - if (Reg) { - State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); - return false; - } + // Any return value split in to more than two values can't be returned + // directly. + if (IsRet && ValNo > 1) + return true; + + // If this is a variadic argument, the RISC-V calling convention requires + // that it is assigned an 'even' or 'aligned' register if it has 8-byte + // alignment (RV32) or 16-byte alignment (RV64). An aligned register should + // be used regardless of whether the original argument was split during + // legalisation or not. The argument will not be passed by registers if the + // original type is larger than 2*XLEN, so the register alignment rule does + // not apply. + unsigned TwoXLenInBytes = (2 * XLen) / 8; + if (!IsFixed && ArgFlags.getOrigAlign() == TwoXLenInBytes && + DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes) { + unsigned RegIdx = State.getFirstUnallocated(ArgGPRs); + // Skip 'odd' register if necessary. + if (RegIdx != array_lengthof(ArgGPRs) && RegIdx % 2 == 1) + State.AllocateReg(ArgGPRs); + } + + SmallVectorImpl &PendingLocs = State.getPendingLocs(); + SmallVectorImpl &PendingArgFlags = + State.getPendingArgFlags(); + + assert(PendingLocs.size() == PendingArgFlags.size() && + "PendingLocs and PendingArgFlags out of sync"); + + // Handle passing f64 on RV32D with a soft float ABI. + if (XLen == 32 && ValVT == MVT::f64) { + assert(!ArgFlags.isSplit() && PendingLocs.empty() && + "Can't lower f64 if it is split"); + // Depending on available argument GPRS, f64 may be passed in a pair of + // GPRs, split between a GPR and the stack, or passed completely on the + // stack. LowerCall/LowerFormalArguments/LowerReturn must recognise these + // cases. + unsigned Reg = State.AllocateReg(ArgGPRs); + LocVT = MVT::i32; + if (!Reg) { + unsigned StackOffset = State.AllocateStack(8, 8); + State.addLoc( + CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo)); + return false; + } + if (!State.AllocateReg(ArgGPRs)) + State.AllocateStack(4, 4); + State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); + return false; + } + + // Split arguments might be passed indirectly, so keep track of the pending + // values. + if (ArgFlags.isSplit() || !PendingLocs.empty()) { + LocVT = XLenVT; + LocInfo = CCValAssign::Indirect; + PendingLocs.push_back( + CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo)); + PendingArgFlags.push_back(ArgFlags); + if (!ArgFlags.isSplitEnd()) { + return false; + } + } + + // If the split argument only had two elements, it should be passed directly + // in registers or on the stack. + if (ArgFlags.isSplitEnd() && PendingLocs.size() <= 2) { + assert(PendingLocs.size() == 2 && "Unexpected PendingLocs.size()"); + // Apply the normal calling convention rules to the first half of the + // split argument. + CCValAssign VA = PendingLocs[0]; + ISD::ArgFlagsTy AF = PendingArgFlags[0]; + PendingLocs.clear(); + PendingArgFlags.clear(); + return CC_RISCVAssign2XLen(XLen, State, VA, AF, ValNo, ValVT, LocVT, + ArgFlags); + } + + // Allocate to a register if possible, or else a stack slot. + unsigned Reg = State.AllocateReg(ArgGPRs); + unsigned StackOffset = Reg ? 0 : State.AllocateStack(XLen / 8, XLen / 8); + + // If we reach this point and PendingLocs is non-empty, we must be at the + // end of a split argument that must be passed indirectly. + if (!PendingLocs.empty()) { + assert(ArgFlags.isSplitEnd() && "Expected ArgFlags.isSplitEnd()"); + assert(PendingLocs.size() > 2 && "Unexpected PendingLocs.size()"); + + for (auto &It : PendingLocs) { + if (Reg) + It.convertToReg(Reg); + else + It.convertToMem(StackOffset); + State.addLoc(It); + } + PendingLocs.clear(); + PendingArgFlags.clear(); + return false; + } + + assert(LocVT == XLenVT && "Expected an XLenVT at this stage"); + + if (Reg) { + State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); + return false; + } - if (ValVT == MVT::f32) { - LocVT = MVT::f32; - LocInfo = CCValAssign::Full; + if (ValVT == MVT::f32) { + LocVT = MVT::f32; + LocInfo = CCValAssign::Full; + } + State.addLoc( + CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo)); + return false; } - State.addLoc(CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo)); - return false; } void RISCVTargetLowering::analyzeInputArgs( Index: lib/Target/RISCV/RISCVRegisterInfo.cpp =================================================================== --- lib/Target/RISCV/RISCVRegisterInfo.cpp +++ lib/Target/RISCV/RISCVRegisterInfo.cpp @@ -34,12 +34,16 @@ const MCPhysReg * RISCVRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { if (MF->getFunction().hasFnAttribute("interrupt")) { + if (MF->getSubtarget().isEmbed()) + return CSR_E_Interrupt_SaveList; if (MF->getSubtarget().hasStdExtD()) return CSR_XLEN_F64_Interrupt_SaveList; if (MF->getSubtarget().hasStdExtF()) return CSR_XLEN_F32_Interrupt_SaveList; return CSR_Interrupt_SaveList; } + if (MF->getSubtarget().isEmbed()) + return CSR_E_SaveList; return CSR_SaveList; } @@ -53,6 +57,26 @@ markSuperRegs(Reserved, RISCV::X3); // gp markSuperRegs(Reserved, RISCV::X4); // tp markSuperRegs(Reserved, RISCV::X8); // fp + + // if rve , need to reserve x16-x31 reg + if (MF.getSubtarget().isEmbed()) { + markSuperRegs(Reserved, RISCV::X16); + markSuperRegs(Reserved, RISCV::X17); + markSuperRegs(Reserved, RISCV::X18); + markSuperRegs(Reserved, RISCV::X19); + markSuperRegs(Reserved, RISCV::X20); + markSuperRegs(Reserved, RISCV::X21); + markSuperRegs(Reserved, RISCV::X22); + markSuperRegs(Reserved, RISCV::X23); + markSuperRegs(Reserved, RISCV::X24); + markSuperRegs(Reserved, RISCV::X25); + markSuperRegs(Reserved, RISCV::X26); + markSuperRegs(Reserved, RISCV::X27); + markSuperRegs(Reserved, RISCV::X28); + markSuperRegs(Reserved, RISCV::X29); + markSuperRegs(Reserved, RISCV::X30); + markSuperRegs(Reserved, RISCV::X31); + } assert(checkAllSuperRegsMarked(Reserved)); return Reserved; } @@ -118,11 +142,15 @@ RISCVRegisterInfo::getCallPreservedMask(const MachineFunction & MF, CallingConv::ID /*CC*/) const { if (MF.getFunction().hasFnAttribute("interrupt")) { + if (MF.getSubtarget().isEmbed()) + return CSR_E_Interrupt_RegMask; if (MF.getSubtarget().hasStdExtD()) return CSR_XLEN_F64_Interrupt_RegMask; if (MF.getSubtarget().hasStdExtF()) return CSR_XLEN_F32_Interrupt_RegMask; return CSR_Interrupt_RegMask; } + if (MF.getSubtarget().isEmbed()) + return CSR_Interrupt_RegMask; return CSR_RegMask; } Index: lib/Target/RISCV/RISCVRegisterInfo.td =================================================================== --- lib/Target/RISCV/RISCVRegisterInfo.td +++ lib/Target/RISCV/RISCVRegisterInfo.td @@ -84,8 +84,9 @@ } } -def XLenVT : ValueTypeByHwMode<[RV32, RV64, DefaultMode], - [i32, i64, i32]>; +def XLenVT : ValueTypeByHwMode<[RV32, RV64, RV32E, DefaultMode], + [i32, i64, i32, i32]>; + // The order of registers represents the preferred allocation sequence. // Registers are listed in the order caller-save, callee-save, specials. @@ -113,8 +114,8 @@ (sequence "X%u", 1, 4) )> { let RegInfos = RegInfoByHwMode< - [RV32, RV64, DefaultMode], - [RegInfo<32,32,32>, RegInfo<64,64,64>, RegInfo<32,32,32>]>; + [RV32, RV64, RV32E, DefaultMode], + [RegInfo<32,32,32>, RegInfo<64,64,64>, RegInfo<32,32,32>, RegInfo<32,32,32>]>; } def GPRNoX0X2 : RegisterClass<"RISCV", [XLenVT], 32, (add Index: lib/Target/RISCV/RISCVSubtarget.h =================================================================== --- lib/Target/RISCV/RISCVSubtarget.h +++ lib/Target/RISCV/RISCVSubtarget.h @@ -36,6 +36,7 @@ bool HasStdExtD = false; bool HasStdExtC = false; bool HasRV64 = false; + bool HasEmbed = false; bool EnableLinkerRelax = false; unsigned XLen = 32; MVT XLenVT = MVT::i32; @@ -48,7 +49,7 @@ /// Initializes using the passed in CPU and feature strings so that we can /// use initializer lists for subtarget initialization. RISCVSubtarget &initializeSubtargetDependencies(StringRef CPU, StringRef FS, - bool Is64Bit); + bool Is64Bit, bool IsEmbed); public: // Initializes the data members to match that of the specified triple. @@ -78,6 +79,7 @@ bool hasStdExtD() const { return HasStdExtD; } bool hasStdExtC() const { return HasStdExtC; } bool is64Bit() const { return HasRV64; } + bool isEmbed() const { return HasEmbed; } bool enableLinkerRelax() const { return EnableLinkerRelax; } MVT getXLenVT() const { return XLenVT; } unsigned getXLen() const { return XLen; } Index: lib/Target/RISCV/RISCVSubtarget.cpp =================================================================== --- lib/Target/RISCV/RISCVSubtarget.cpp +++ lib/Target/RISCV/RISCVSubtarget.cpp @@ -28,11 +28,17 @@ RISCVSubtarget &RISCVSubtarget::initializeSubtargetDependencies(StringRef CPU, StringRef FS, - bool Is64Bit) { + bool Is64Bit, + bool IsEmbed) { // Determine default and user-specified characteristics std::string CPUName = CPU; - if (CPUName.empty()) - CPUName = Is64Bit ? "generic-rv64" : "generic-rv32"; + if (CPUName.empty()) { + if (IsEmbed) { + CPUName = "generic-rv32e"; + } else { + CPUName = Is64Bit ? "generic-rv64" : "generic-rv32"; + } + } ParseSubtargetFeatures(CPUName, FS); if (Is64Bit) { XLenVT = MVT::i64; @@ -44,5 +50,6 @@ RISCVSubtarget::RISCVSubtarget(const Triple &TT, const std::string &CPU, const std::string &FS, const TargetMachine &TM) : RISCVGenSubtargetInfo(TT, CPU, FS), - FrameLowering(initializeSubtargetDependencies(CPU, FS, TT.isArch64Bit())), + FrameLowering(initializeSubtargetDependencies( + CPU, FS, TT.isArch64Bit(), TT.getArch() == llvm::Triple::riscv32e)), InstrInfo(), RegInfo(getHwMode()), TLInfo(TM, *this) {} Index: lib/Target/RISCV/RISCVTargetMachine.cpp =================================================================== --- lib/Target/RISCV/RISCVTargetMachine.cpp +++ lib/Target/RISCV/RISCVTargetMachine.cpp @@ -27,6 +27,7 @@ extern "C" void LLVMInitializeRISCVTarget() { RegisterTargetMachine X(getTheRISCV32Target()); RegisterTargetMachine Y(getTheRISCV64Target()); + RegisterTargetMachine Z(getTheRISCV32ETarget()); auto PR = PassRegistry::getPassRegistry(); initializeRISCVExpandPseudoPass(*PR); } Index: lib/Target/RISCV/TargetInfo/RISCVTargetInfo.cpp =================================================================== --- lib/Target/RISCV/TargetInfo/RISCVTargetInfo.cpp +++ lib/Target/RISCV/TargetInfo/RISCVTargetInfo.cpp @@ -20,6 +20,11 @@ static Target TheRISCV64Target; return TheRISCV64Target; } + +Target &getTheRISCV32ETarget() { + static Target TheRISCV32ETarget; + return TheRISCV32ETarget; +} } extern "C" void LLVMInitializeRISCVTargetInfo() { @@ -27,4 +32,6 @@ "32-bit RISC-V", "RISCV"); RegisterTarget Y(getTheRISCV64Target(), "riscv64", "64-bit RISC-V", "RISCV"); + RegisterTarget Z(getTheRISCV32ETarget(), "riscv32e", + "32-bit RISC-V embed", "RISCV"); }