diff --git a/clang/lib/Driver/SanitizerArgs.cpp b/clang/lib/Driver/SanitizerArgs.cpp --- a/clang/lib/Driver/SanitizerArgs.cpp +++ b/clang/lib/Driver/SanitizerArgs.cpp @@ -547,11 +547,11 @@ ((TC.getTriple().isAArch64() && !llvm::AArch64::isX18ReservedByDefault(TC.getTriple())) || (TC.getTriple().isRISCV() && - !llvm::RISCV::isX18ReservedByDefault(TC.getTriple()))) && - !Args.hasArg(options::OPT_ffixed_x18) && DiagnoseErrors) { + !llvm::RISCV::isX27ReservedByDefault(TC.getTriple()))) && + !Args.hasArg(options::OPT_ffixed_x27) && DiagnoseErrors) { D.Diag(diag::err_drv_argument_only_allowed_with) << lastArgumentForMask(D, Args, Kinds & SanitizerKind::ShadowCallStack) - << "-ffixed-x18"; + << (TC.getTriple().isAArch64() ? "-ffixed-x18":"-ffixed-x27"); } // Report error if there are non-trapping sanitizers that require diff --git a/compiler-rt/test/shadowcallstack/lit.cfg.py b/compiler-rt/test/shadowcallstack/lit.cfg.py --- a/compiler-rt/test/shadowcallstack/lit.cfg.py +++ b/compiler-rt/test/shadowcallstack/lit.cfg.py @@ -17,7 +17,9 @@ scs_arch_cflags = config.target_cflags if config.target_arch == 'aarch64': scs_arch_cflags += ' -ffixed-x18 ' +else if config.target_arch == 'riscv64': + scs_arch_cflags += ' -ffixed-x27 ' config.substitutions.append( ("%clang_scs ", config.clang + ' -O0 -fsanitize=shadow-call-stack ' + scs_arch_cflags + ' ') ) -if config.host_os not in ['Linux'] or config.target_arch not in ['aarch64']: +if config.host_os not in ['Linux'] or config.target_arch not in ['aarch64','riscv64']: config.unsupported = True diff --git a/llvm/include/llvm/TargetParser/RISCVTargetParser.h b/llvm/include/llvm/TargetParser/RISCVTargetParser.h --- a/llvm/include/llvm/TargetParser/RISCVTargetParser.h +++ b/llvm/include/llvm/TargetParser/RISCVTargetParser.h @@ -41,7 +41,7 @@ void fillValidTuneCPUArchList(SmallVectorImpl &Values, bool IsRV64); bool getCPUFeaturesExceptStdExt(CPUKind Kind, std::vector &Features); -bool isX18ReservedByDefault(const Triple &TT); +bool isX27ReservedByDefault(const Triple &TT); } // namespace RISCV } // namespace llvm diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.cpp b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.cpp --- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.cpp +++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.cpp @@ -90,7 +90,7 @@ MCRegister getBPReg() { return RISCV::X9; } // Returns the register holding shadow call stack pointer. -MCRegister getSCSPReg() { return RISCV::X18; } +MCRegister getSCSPReg() { return RISCV::X27; } } // namespace RISCVABI diff --git a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp @@ -27,8 +27,8 @@ using namespace llvm; -// For now we use x18, a.k.a s2, as pointer to shadow call stack. -// User should explicitly set -ffixed-x18 and not use x18 in their asm. +// For now we use x27, a.k.a s11, as pointer to shadow call stack. +// User should explicitly set -ffixed-x27 and not use x27 in their asm. static void emitSCSPrologue(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL) { @@ -51,10 +51,11 @@ auto &Ctx = MF.getFunction().getContext(); if (!STI.isRegisterReservedByUser(SCSPReg)) { Ctx.diagnose(DiagnosticInfoUnsupported{ - MF.getFunction(), "x18 not reserved by user for Shadow Call Stack."}); + MF.getFunction(), "x27 not reserved by user for Shadow Call Stack."}); return; } + // FIXME: Using x27 for SCS should remove this restriction. const auto *RVFI = MF.getInfo(); if (RVFI->useSaveRestoreLibCalls(MF)) { Ctx.diagnose(DiagnosticInfoUnsupported{ @@ -67,8 +68,8 @@ bool IsRV64 = STI.hasFeature(RISCV::Feature64Bit); int64_t SlotSize = STI.getXLen() / 8; // Store return address to shadow call stack - // s[w|d] ra, 0(s2) - // addi s2, s2, [4|8] + // s[w|d] ra, 0(s11) + // addi s11, s11, [4|8] BuildMI(MBB, MI, DL, TII->get(IsRV64 ? RISCV::SD : RISCV::SW)) .addReg(RAReg) .addReg(SCSPReg) @@ -83,7 +84,7 @@ // Emit a CFI instruction that causes SlotSize to be subtracted from the value // of the shadow stack pointer when unwinding past this frame. char DwarfSCSReg = TRI->getDwarfRegNum(SCSPReg, /*IsEH*/ true); - assert(DwarfSCSReg < 32 && "SCS Register should be < 32 (X18)."); + assert(DwarfSCSReg < 32 && "SCS Register should be < 32 (X27)."); char Offset = static_cast(-SlotSize) & 0x7f; const char CFIInst[] = { @@ -121,7 +122,7 @@ auto &Ctx = MF.getFunction().getContext(); if (!STI.isRegisterReservedByUser(SCSPReg)) { Ctx.diagnose(DiagnosticInfoUnsupported{ - MF.getFunction(), "x18 not reserved by user for Shadow Call Stack."}); + MF.getFunction(), "x27 not reserved by user for Shadow Call Stack."}); return; } @@ -137,8 +138,8 @@ bool IsRV64 = STI.hasFeature(RISCV::Feature64Bit); int64_t SlotSize = STI.getXLen() / 8; // Load return address from shadow call stack - // l[w|d] ra, -[4|8](s2) - // addi s2, s2, -[4|8] + // l[w|d] ra, -[4|8](s11) + // addi s11, s11, -[4|8] BuildMI(MBB, MI, DL, TII->get(IsRV64 ? RISCV::LD : RISCV::LW)) .addReg(RAReg, RegState::Define) .addReg(SCSPReg) diff --git a/llvm/lib/Target/RISCV/RISCVSubtarget.cpp b/llvm/lib/Target/RISCV/RISCVSubtarget.cpp --- a/llvm/lib/Target/RISCV/RISCVSubtarget.cpp +++ b/llvm/lib/Target/RISCV/RISCVSubtarget.cpp @@ -83,8 +83,8 @@ FrameLowering( initializeSubtargetDependencies(TT, CPU, TuneCPU, FS, ABIName)), InstrInfo(*this), RegInfo(getHwMode()), TLInfo(TM, *this) { - if (RISCV::isX18ReservedByDefault(TT)) - UserReservedRegister.set(RISCV::X18); + if (RISCV::isX27ReservedByDefault(TT)) + UserReservedRegister.set(RISCV::X27); CallLoweringInfo.reset(new RISCVCallLowering(*getTargetLowering())); Legalizer.reset(new RISCVLegalizerInfo(*this)); diff --git a/llvm/lib/TargetParser/RISCVTargetParser.cpp b/llvm/lib/TargetParser/RISCVTargetParser.cpp --- a/llvm/lib/TargetParser/RISCVTargetParser.cpp +++ b/llvm/lib/TargetParser/RISCVTargetParser.cpp @@ -101,8 +101,8 @@ return true; } -bool isX18ReservedByDefault(const Triple &TT) { - // X18 is reserved for the ShadowCallStack ABI (even when not enabled). +bool isX27ReservedByDefault(const Triple &TT) { + // X27 reserved for the ShadowCallStack ABI (even when not enabled). return TT.isOSFuchsia() || TT.isAndroid(); } diff --git a/llvm/test/CodeGen/RISCV/reserved-regs.ll b/llvm/test/CodeGen/RISCV/reserved-regs.ll --- a/llvm/test/CodeGen/RISCV/reserved-regs.ll +++ b/llvm/test/CodeGen/RISCV/reserved-regs.ll @@ -57,8 +57,10 @@ ; RUN: llc -mtriple=riscv32 -mattr=+reserve-x31 -verify-machineinstrs < %s | FileCheck %s -check-prefix=X31 ; RUN: llc -mtriple=riscv64 -mattr=+reserve-x31 -verify-machineinstrs < %s | FileCheck %s -check-prefix=X31 -; RUN: llc -mtriple=riscv64-fuchsia -verify-machineinstrs < %s | FileCheck %s -check-prefix=X18 -; RUN: llc -mtriple=riscv64-linux-android -verify-machineinstrs < %s | FileCheck %s -check-prefix=X18 +;; Check that targets that reserve a register by default reserve the correct registers +;; Android and Fuchsia reserve the ShadowCallStack register (S11/x27) by default. +; RUN: llc -mtriple=riscv64-fuchsia -verify-machineinstrs < %s | FileCheck %s -check-prefix=X27 +; RUN: llc -mtriple=riscv64-linux-android -verify-machineinstrs < %s | FileCheck %s -check-prefix=X27 ; This program is free to use all registers, but needs a stack pointer for ; spill values, so do not test for reserving the stack pointer. diff --git a/llvm/test/CodeGen/RISCV/shadowcallstack.ll b/llvm/test/CodeGen/RISCV/shadowcallstack.ll --- a/llvm/test/CodeGen/RISCV/shadowcallstack.ll +++ b/llvm/test/CodeGen/RISCV/shadowcallstack.ll @@ -1,7 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+reserve-x18 -verify-machineinstrs < %s \ +; RUN: llc -mtriple=riscv32 -mattr=+reserve-x27 -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefix=RV32 -; RUN: llc -mtriple=riscv64 -mattr=+reserve-x18 -verify-machineinstrs < %s \ +; RUN: llc -mtriple=riscv64 -mattr=+reserve-x27 -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefix=RV64 define void @f1() shadowcallstack { @@ -34,9 +34,9 @@ define i32 @f3() shadowcallstack { ; RV32-LABEL: f3: ; RV32: # %bb.0: -; RV32-NEXT: sw ra, 0(s2) -; RV32-NEXT: addi s2, s2, 4 -; RV32-NEXT: .cfi_escape 0x16, 0x12, 0x02, 0x82, 0x7c # +; RV32-NEXT: sw ra, 0(s11) +; RV32-NEXT: addi s11, s11, 4 +; RV32-NEXT: .cfi_escape 0x16, 0x1b, 0x02, 0x8b, 0x7c # ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill @@ -44,16 +44,16 @@ ; RV32-NEXT: call bar@plt ; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 16 -; RV32-NEXT: lw ra, -4(s2) -; RV32-NEXT: addi s2, s2, -4 -; RV32-NEXT: .cfi_restore s2 +; RV32-NEXT: lw ra, -4(s11) +; RV32-NEXT: addi s11, s11, -4 +; RV32-NEXT: .cfi_restore s11 ; RV32-NEXT: ret ; ; RV64-LABEL: f3: ; RV64: # %bb.0: -; RV64-NEXT: sd ra, 0(s2) -; RV64-NEXT: addi s2, s2, 8 -; RV64-NEXT: .cfi_escape 0x16, 0x12, 0x02, 0x82, 0x78 # +; RV64-NEXT: sd ra, 0(s11) +; RV64-NEXT: addi s11, s11, 8 +; RV64-NEXT: .cfi_escape 0x16, 0x1b, 0x02, 0x8b, 0x78 # ; RV64-NEXT: addi sp, sp, -16 ; RV64-NEXT: .cfi_def_cfa_offset 16 ; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill @@ -61,9 +61,9 @@ ; RV64-NEXT: call bar@plt ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64-NEXT: addi sp, sp, 16 -; RV64-NEXT: ld ra, -8(s2) -; RV64-NEXT: addi s2, s2, -8 -; RV64-NEXT: .cfi_restore s2 +; RV64-NEXT: ld ra, -8(s11) +; RV64-NEXT: addi s11, s11, -8 +; RV64-NEXT: .cfi_restore s11 ; RV64-NEXT: ret %res = call i32 @bar() %res1 = add i32 %res, 1 @@ -73,72 +73,72 @@ define i32 @f4() shadowcallstack { ; RV32-LABEL: f4: ; RV32: # %bb.0: -; RV32-NEXT: sw ra, 0(s2) -; RV32-NEXT: addi s2, s2, 4 -; RV32-NEXT: .cfi_escape 0x16, 0x12, 0x02, 0x82, 0x7c # +; RV32-NEXT: sw ra, 0(s11) +; RV32-NEXT: addi s11, s11, 4 +; RV32-NEXT: .cfi_escape 0x16, 0x1b, 0x02, 0x8b, 0x7c # ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32-NEXT: sw s0, 8(sp) # 4-byte Folded Spill ; RV32-NEXT: sw s1, 4(sp) # 4-byte Folded Spill -; RV32-NEXT: sw s3, 0(sp) # 4-byte Folded Spill +; RV32-NEXT: sw s2, 0(sp) # 4-byte Folded Spill ; RV32-NEXT: .cfi_offset ra, -4 ; RV32-NEXT: .cfi_offset s0, -8 ; RV32-NEXT: .cfi_offset s1, -12 -; RV32-NEXT: .cfi_offset s3, -16 +; RV32-NEXT: .cfi_offset s2, -16 ; RV32-NEXT: call bar@plt ; RV32-NEXT: mv s0, a0 ; RV32-NEXT: call bar@plt ; RV32-NEXT: mv s1, a0 ; RV32-NEXT: call bar@plt -; RV32-NEXT: mv s3, a0 +; RV32-NEXT: mv s2, a0 ; RV32-NEXT: call bar@plt ; RV32-NEXT: add s0, s0, s1 -; RV32-NEXT: add a0, s3, a0 +; RV32-NEXT: add a0, s2, a0 ; RV32-NEXT: add a0, s0, a0 ; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s0, 8(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s1, 4(sp) # 4-byte Folded Reload -; RV32-NEXT: lw s3, 0(sp) # 4-byte Folded Reload +; RV32-NEXT: lw s2, 0(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 16 -; RV32-NEXT: lw ra, -4(s2) -; RV32-NEXT: addi s2, s2, -4 -; RV32-NEXT: .cfi_restore s2 +; RV32-NEXT: lw ra, -4(s11) +; RV32-NEXT: addi s11, s11, -4 +; RV32-NEXT: .cfi_restore s11 ; RV32-NEXT: ret ; ; RV64-LABEL: f4: ; RV64: # %bb.0: -; RV64-NEXT: sd ra, 0(s2) -; RV64-NEXT: addi s2, s2, 8 -; RV64-NEXT: .cfi_escape 0x16, 0x12, 0x02, 0x82, 0x78 # +; RV64-NEXT: sd ra, 0(s11) +; RV64-NEXT: addi s11, s11, 8 +; RV64-NEXT: .cfi_escape 0x16, 0x1b, 0x02, 0x8b, 0x78 # ; RV64-NEXT: addi sp, sp, -32 ; RV64-NEXT: .cfi_def_cfa_offset 32 ; RV64-NEXT: sd ra, 24(sp) # 8-byte Folded Spill ; RV64-NEXT: sd s0, 16(sp) # 8-byte Folded Spill ; RV64-NEXT: sd s1, 8(sp) # 8-byte Folded Spill -; RV64-NEXT: sd s3, 0(sp) # 8-byte Folded Spill +; RV64-NEXT: sd s2, 0(sp) # 8-byte Folded Spill ; RV64-NEXT: .cfi_offset ra, -8 ; RV64-NEXT: .cfi_offset s0, -16 ; RV64-NEXT: .cfi_offset s1, -24 -; RV64-NEXT: .cfi_offset s3, -32 +; RV64-NEXT: .cfi_offset s2, -32 ; RV64-NEXT: call bar@plt ; RV64-NEXT: mv s0, a0 ; RV64-NEXT: call bar@plt ; RV64-NEXT: mv s1, a0 ; RV64-NEXT: call bar@plt -; RV64-NEXT: mv s3, a0 +; RV64-NEXT: mv s2, a0 ; RV64-NEXT: call bar@plt ; RV64-NEXT: add s0, s0, s1 -; RV64-NEXT: add a0, s3, a0 +; RV64-NEXT: add a0, s2, a0 ; RV64-NEXT: addw a0, s0, a0 ; RV64-NEXT: ld ra, 24(sp) # 8-byte Folded Reload ; RV64-NEXT: ld s0, 16(sp) # 8-byte Folded Reload ; RV64-NEXT: ld s1, 8(sp) # 8-byte Folded Reload -; RV64-NEXT: ld s3, 0(sp) # 8-byte Folded Reload +; RV64-NEXT: ld s2, 0(sp) # 8-byte Folded Reload ; RV64-NEXT: addi sp, sp, 32 -; RV64-NEXT: ld ra, -8(s2) -; RV64-NEXT: addi s2, s2, -8 -; RV64-NEXT: .cfi_restore s2 +; RV64-NEXT: ld ra, -8(s11) +; RV64-NEXT: addi s11, s11, -8 +; RV64-NEXT: .cfi_restore s11 ; RV64-NEXT: ret %res1 = call i32 @bar() %res2 = call i32 @bar() @@ -153,28 +153,28 @@ define i32 @f5() shadowcallstack nounwind { ; RV32-LABEL: f5: ; RV32: # %bb.0: -; RV32-NEXT: sw ra, 0(s2) -; RV32-NEXT: addi s2, s2, 4 +; RV32-NEXT: sw ra, 0(s11) +; RV32-NEXT: addi s11, s11, 4 ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32-NEXT: call bar@plt ; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 16 -; RV32-NEXT: lw ra, -4(s2) -; RV32-NEXT: addi s2, s2, -4 +; RV32-NEXT: lw ra, -4(s11) +; RV32-NEXT: addi s11, s11, -4 ; RV32-NEXT: ret ; ; RV64-LABEL: f5: ; RV64: # %bb.0: -; RV64-NEXT: sd ra, 0(s2) -; RV64-NEXT: addi s2, s2, 8 +; RV64-NEXT: sd ra, 0(s11) +; RV64-NEXT: addi s11, s11, 8 ; RV64-NEXT: addi sp, sp, -16 ; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64-NEXT: call bar@plt ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64-NEXT: addi sp, sp, 16 -; RV64-NEXT: ld ra, -8(s2) -; RV64-NEXT: addi s2, s2, -8 +; RV64-NEXT: ld ra, -8(s11) +; RV64-NEXT: addi s11, s11, -8 ; RV64-NEXT: ret %res = call i32 @bar() %res1 = add i32 %res, 1