diff --git a/llvm/lib/Target/RISCV/RISCVTargetMachine.h b/llvm/lib/Target/RISCV/RISCVTargetMachine.h --- a/llvm/lib/Target/RISCV/RISCVTargetMachine.h +++ b/llvm/lib/Target/RISCV/RISCVTargetMachine.h @@ -43,6 +43,9 @@ } TargetTransformInfo getTargetTransformInfo(const Function &F) override; + + virtual bool isNoopAddrSpaceCast(unsigned SrcAS, + unsigned DstAS) const override; }; } diff --git a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp --- a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp +++ b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp @@ -113,6 +113,15 @@ return TargetTransformInfo(RISCVTTIImpl(this, F)); } +// A RISC-V hart has a single byte-addressable address space of 2^XLEN bytes +// for all memory accesses, so it is reasonable to assume that an +// implementation has no-op address space casts. If an implementation makes a +// change to this, they can override it here. +bool RISCVTargetMachine::isNoopAddrSpaceCast(unsigned SrcAS, + unsigned DstAS) const { + return true; +} + namespace { class RISCVPassConfig : public TargetPassConfig { public: diff --git a/llvm/test/CodeGen/RISCV/addrspacecast.ll b/llvm/test/CodeGen/RISCV/addrspacecast.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/addrspacecast.ll @@ -0,0 +1,49 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \ +; RUN: | FileCheck %s --check-prefix=RV32I +; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \ +; RUN: | FileCheck %s --check-prefix=RV64I + +define void @cast0(i32 addrspace(1)* %ptr) { +; RV32I-LABEL: cast0: +; RV32I: # %bb.0: +; RV32I-NEXT: sw zero, 0(a0) +; RV32I-NEXT: ret +; +; RV64I-LABEL: cast0: +; RV64I: # %bb.0: +; RV64I-NEXT: sw zero, 0(a0) +; RV64I-NEXT: ret + %ptr0 = addrspacecast i32 addrspace(1)* %ptr to i32 addrspace(0)* + store i32 0, i32* %ptr0 + ret void +} + +define void @cast1(i32* %ptr) { +; RV32I-LABEL: cast1: +; RV32I: # %bb.0: +; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 +; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32I-NEXT: .cfi_offset ra, -4 +; RV32I-NEXT: call foo@plt +; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: ret +; +; RV64I-LABEL: cast1: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 +; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64I-NEXT: .cfi_offset ra, -8 +; RV64I-NEXT: call foo@plt +; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret + %castptr = addrspacecast i32* %ptr to i32 addrspace(10)* + call void @foo(i32 addrspace(10)* %castptr) + ret void +} + +declare void @foo(i32 addrspace(10)*)