diff --git a/llvm/lib/Target/RISCV/RISCV.td b/llvm/lib/Target/RISCV/RISCV.td --- a/llvm/lib/Target/RISCV/RISCV.td +++ b/llvm/lib/Target/RISCV/RISCV.td @@ -484,6 +484,11 @@ def HasAtomicLdSt : Predicate<"Subtarget->hasStdExtA() || Subtarget->hasForcedAtomics()">; +def FeatureTaggedGlobals : SubtargetFeature<"tagged-globals", + "AllowTaggedGlobals", + "true", "Use an instruction sequence for taking the address of a global " + "that allows a memory tag in the upper address bits">; + //===----------------------------------------------------------------------===// // Named operands for CSR instructions. //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp b/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp --- a/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp +++ b/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp @@ -454,8 +454,12 @@ MachineBasicBlock::iterator &NextMBBI) { MachineFunction *MF = MBB.getParent(); - assert(MF->getTarget().isPositionIndependent()); const auto &STI = MF->getSubtarget(); + // When HWASAN is used and tagging of global variables is enabled + // they should be accessed via the GOT, since the tagged address of a global + // is incompatible with existing code models. This also applies to non-pic + // mode. + assert(MF->getTarget().isPositionIndependent() || STI.allowTaggedGlobals()); unsigned SecondOpcode = STI.is64Bit() ? RISCV::LD : RISCV::LW; return expandAuipcInstPair(MBB, MBBI, NextMBBI, RISCVII::MO_GOT_HI, SecondOpcode); diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -4130,9 +4130,13 @@ SDLoc DL(N); EVT Ty = getPointerTy(DAG.getDataLayout()); - if (isPositionIndependent()) { + // When HWASAN is used and tagging of global variables is enabled + // they should be accessed via the GOT, since the tagged address of a global + // is incompatible with existing code models. This also applies to non-pic + // mode. + if (isPositionIndependent() || Subtarget.allowTaggedGlobals()) { SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0); - if (IsLocal) + if (IsLocal && !Subtarget.allowTaggedGlobals()) // Use PC-relative addressing to access the symbol. This generates the // pattern (PseudoLLA sym), which expands to (addi (auipc %pcrel_hi(sym)) // %pcrel_lo(auipc)). diff --git a/llvm/lib/Target/RISCV/RISCVSubtarget.h b/llvm/lib/Target/RISCV/RISCVSubtarget.h --- a/llvm/lib/Target/RISCV/RISCVSubtarget.h +++ b/llvm/lib/Target/RISCV/RISCVSubtarget.h @@ -101,6 +101,7 @@ bool HasShortForwardBranchOpt = false; bool HasLUIADDIFusion = false; bool HasForcedAtomics = false; + bool AllowTaggedGlobals = false; unsigned XLen = 32; unsigned ZvlLen = 0; MVT XLenVT = MVT::i32; @@ -199,6 +200,7 @@ bool enableUnalignedScalarMem() const { return EnableUnalignedScalarMem; } bool hasLUIADDIFusion() const { return HasLUIADDIFusion; } bool hasForcedAtomics() const { return HasForcedAtomics; } + bool allowTaggedGlobals() const { return AllowTaggedGlobals; } MVT getXLenVT() const { return XLenVT; } unsigned getXLen() const { return XLen; } unsigned getFLen() const { diff --git a/llvm/test/CodeGen/RISCV/tagged-globals.ll b/llvm/test/CodeGen/RISCV/tagged-globals.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/tagged-globals.ll @@ -0,0 +1,87 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --no-generate-body-for-unused-prefixes +; RUN: llc --relocation-model=pic < %s | FileCheck %s +; RUN: llc --relocation-model=static < %s | FileCheck %s + +@global_ext = external global i32 +@global_int = internal global i32 0 +declare void @func() + +define i32* @global_addr() #0 { +; CHECK-LABEL: global_addr: +; CHECK: # %bb.0: +; CHECK-NEXT: .Lpcrel_hi0: +; CHECK-NEXT: auipc a0, %got_pcrel_hi(global_ext) +; CHECK-NEXT: ld a0, %pcrel_lo(.Lpcrel_hi0)(a0) +; CHECK-NEXT: ret + ret i32* @global_ext +} + +define i32 @global_load() #0 { +; CHECK-LABEL: global_load: +; CHECK: # %bb.0: +; CHECK-NEXT: .Lpcrel_hi1: +; CHECK-NEXT: auipc a0, %got_pcrel_hi(global_ext) +; CHECK-NEXT: ld a0, %pcrel_lo(.Lpcrel_hi1)(a0) +; CHECK-NEXT: lw a0, 0(a0) +; CHECK-NEXT: ret + %load = load i32, i32* @global_ext + ret i32 %load +} + +define void @global_store() #0 { +; CHECK-LABEL: global_store: +; CHECK: # %bb.0: +; CHECK-NEXT: .Lpcrel_hi2: +; CHECK-NEXT: auipc a0, %got_pcrel_hi(global_ext) +; CHECK-NEXT: ld a0, %pcrel_lo(.Lpcrel_hi2)(a0) +; CHECK-NEXT: sw zero, 0(a0) +; CHECK-NEXT: ret + store i32 0, i32* @global_ext + ret void +} + +define i32* @global_int_addr() #0 { +; CHECK-LABEL: global_int_addr: +; CHECK: # %bb.0: +; CHECK-NEXT: .Lpcrel_hi3: +; CHECK-NEXT: auipc a0, %got_pcrel_hi(global_int) +; CHECK-NEXT: ld a0, %pcrel_lo(.Lpcrel_hi3)(a0) +; CHECK-NEXT: ret + ret i32* @global_int +} + +define i32 @global_int_load() #0 { +; CHECK-LABEL: global_int_load: +; CHECK: # %bb.0: +; CHECK-NEXT: .Lpcrel_hi4: +; CHECK-NEXT: auipc a0, %got_pcrel_hi(global_int) +; CHECK-NEXT: ld a0, %pcrel_lo(.Lpcrel_hi4)(a0) +; CHECK-NEXT: lw a0, 0(a0) +; CHECK-NEXT: ret + %load = load i32, i32* @global_int + ret i32 %load +} + +define void @global_int_store() #0 { +; CHECK-LABEL: global_int_store: +; CHECK: # %bb.0: +; CHECK-NEXT: .Lpcrel_hi5: +; CHECK-NEXT: auipc a0, %got_pcrel_hi(global_int) +; CHECK-NEXT: ld a0, %pcrel_lo(.Lpcrel_hi5)(a0) +; CHECK-NEXT: sw zero, 0(a0) +; CHECK-NEXT: ret + store i32 0, i32* @global_int + ret void +} + +define void ()* @func_addr() #0 { +; CHECK-LABEL: func_addr: +; CHECK: # %bb.0: +; CHECK-NEXT: .Lpcrel_hi6: +; CHECK-NEXT: auipc a0, %got_pcrel_hi(func) +; CHECK-NEXT: ld a0, %pcrel_lo(.Lpcrel_hi6)(a0) +; CHECK-NEXT: ret + ret void ()* @func +} + +attributes #0 = { "target-features"="+tagged-globals" }