diff --git a/llvm/test/CodeGen/RISCV/local-stack-slot-allocation.ll b/llvm/test/CodeGen/RISCV/local-stack-slot-allocation.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/local-stack-slot-allocation.ll @@ -0,0 +1,54 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s | FileCheck %s --check-prefix=RV32I +; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s | FileCheck %s --check-prefix=RV64I + +; This test case test the LocalStackSlotAllocation pass that use a base register +; for the frame index that its offset is out-of-range (for RISC-V. the immediate +; is 12 bits for the load store instruction (excludes vector load / store)) +; TODO: Enable LocalStackSlotAllocation pass. +define void @use_frame_base_reg() { +; RV32I-LABEL: use_frame_base_reg: +; RV32I: # %bb.0: +; RV32I-NEXT: lui a0, 24 +; RV32I-NEXT: addi a0, a0, 1712 +; RV32I-NEXT: sub sp, sp, a0 +; RV32I-NEXT: .cfi_def_cfa_offset 100016 +; RV32I-NEXT: lui a0, 24 +; RV32I-NEXT: addi a0, a0, 1708 +; RV32I-NEXT: add a0, sp, a0 +; RV32I-NEXT: lb a0, 0(a0) +; RV32I-NEXT: lui a0, 24 +; RV32I-NEXT: addi a0, a0, 1704 +; RV32I-NEXT: add a0, sp, a0 +; RV32I-NEXT: lb a0, 0(a0) +; RV32I-NEXT: lui a0, 24 +; RV32I-NEXT: addi a0, a0, 1712 +; RV32I-NEXT: add sp, sp, a0 +; RV32I-NEXT: ret +; +; RV64I-LABEL: use_frame_base_reg: +; RV64I: # %bb.0: +; RV64I-NEXT: lui a0, 24 +; RV64I-NEXT: addiw a0, a0, 1712 +; RV64I-NEXT: sub sp, sp, a0 +; RV64I-NEXT: .cfi_def_cfa_offset 100016 +; RV64I-NEXT: lui a0, 24 +; RV64I-NEXT: addiw a0, a0, 1708 +; RV64I-NEXT: add a0, sp, a0 +; RV64I-NEXT: lb a0, 0(a0) +; RV64I-NEXT: lui a0, 24 +; RV64I-NEXT: addiw a0, a0, 1704 +; RV64I-NEXT: add a0, sp, a0 +; RV64I-NEXT: lb a0, 0(a0) +; RV64I-NEXT: lui a0, 24 +; RV64I-NEXT: addiw a0, a0, 1712 +; RV64I-NEXT: add sp, sp, a0 +; RV64I-NEXT: ret + + %va = alloca i8, align 4 + %va1 = alloca i8, align 4 + %large = alloca [ 100000 x i8 ] + %argp.cur = load volatile i8, i8* %va, align 4 + %argp.next = load volatile i8, i8* %va1, align 4 + ret void +}