diff --git a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp @@ -582,7 +582,12 @@ // Offsets for objects with fixed locations (IE: those saved by libcall) are // simply calculated from the frame index. if (FrameIdx < 0) - Offset = FrameIdx * (int64_t) STI.getXLen() / 8; + if (RVFI->isPushable(MF)) { + // Callee-saved register stored by Zcmp push is in reverse order. + Offset = -(FrameIdx + RVFI->getRVPushRegs() + 1) * + (int64_t)STI.getXLen() / 8; + } else + Offset = FrameIdx * (int64_t)STI.getXLen() / 8; else Offset = MFI.getObjectOffset(FrameIdx) - RVFI->getLibCallStackSize(); diff --git a/llvm/test/CodeGen/RISCV/push-pop-popret.ll b/llvm/test/CodeGen/RISCV/push-pop-popret.ll --- a/llvm/test/CodeGen/RISCV/push-pop-popret.ll +++ b/llvm/test/CodeGen/RISCV/push-pop-popret.ll @@ -99,8 +99,8 @@ ; RV32IZCMP: # %bb.0: # %entry ; RV32IZCMP-NEXT: cm.push {ra, s0}, -16 ; RV32IZCMP-NEXT: .cfi_def_cfa_offset 16 -; RV32IZCMP-NEXT: .cfi_offset ra, -4 -; RV32IZCMP-NEXT: .cfi_offset s0, -8 +; RV32IZCMP-NEXT: .cfi_offset ra, -8 +; RV32IZCMP-NEXT: .cfi_offset s0, -4 ; RV32IZCMP-NEXT: addi s0, sp, 16 ; RV32IZCMP-NEXT: .cfi_def_cfa s0, 0 ; RV32IZCMP-NEXT: addi a0, a0, 15 @@ -115,8 +115,8 @@ ; RV64IZCMP: # %bb.0: # %entry ; RV64IZCMP-NEXT: cm.push {ra, s0}, -16 ; RV64IZCMP-NEXT: .cfi_def_cfa_offset 16 -; RV64IZCMP-NEXT: .cfi_offset ra, -8 -; RV64IZCMP-NEXT: .cfi_offset s0, -16 +; RV64IZCMP-NEXT: .cfi_offset ra, -16 +; RV64IZCMP-NEXT: .cfi_offset s0, -8 ; RV64IZCMP-NEXT: addi s0, sp, 16 ; RV64IZCMP-NEXT: .cfi_def_cfa s0, 0 ; RV64IZCMP-NEXT: slli a0, a0, 32 @@ -221,8 +221,8 @@ ; RV32IZCMP: # %bb.0: # %entry ; RV32IZCMP-NEXT: cm.push {ra, s0}, -16 ; RV32IZCMP-NEXT: .cfi_def_cfa_offset 16 -; RV32IZCMP-NEXT: .cfi_offset ra, -4 -; RV32IZCMP-NEXT: .cfi_offset s0, -8 +; RV32IZCMP-NEXT: .cfi_offset ra, -8 +; RV32IZCMP-NEXT: .cfi_offset s0, -4 ; RV32IZCMP-NEXT: addi s0, sp, 16 ; RV32IZCMP-NEXT: .cfi_def_cfa s0, 0 ; RV32IZCMP-NEXT: addi a0, a0, 15 @@ -238,8 +238,8 @@ ; RV64IZCMP: # %bb.0: # %entry ; RV64IZCMP-NEXT: cm.push {ra, s0}, -16 ; RV64IZCMP-NEXT: .cfi_def_cfa_offset 16 -; RV64IZCMP-NEXT: .cfi_offset ra, -8 -; RV64IZCMP-NEXT: .cfi_offset s0, -16 +; RV64IZCMP-NEXT: .cfi_offset ra, -16 +; RV64IZCMP-NEXT: .cfi_offset s0, -8 ; RV64IZCMP-NEXT: addi s0, sp, 16 ; RV64IZCMP-NEXT: .cfi_def_cfa s0, 0 ; RV64IZCMP-NEXT: slli a0, a0, 32 @@ -345,8 +345,8 @@ ; RV32IZCMP: # %bb.0: # %entry ; RV32IZCMP-NEXT: cm.push {ra, s0}, -16 ; RV32IZCMP-NEXT: .cfi_def_cfa_offset 16 -; RV32IZCMP-NEXT: .cfi_offset ra, -4 -; RV32IZCMP-NEXT: .cfi_offset s0, -8 +; RV32IZCMP-NEXT: .cfi_offset ra, -8 +; RV32IZCMP-NEXT: .cfi_offset s0, -4 ; RV32IZCMP-NEXT: addi s0, sp, 16 ; RV32IZCMP-NEXT: .cfi_def_cfa s0, 0 ; RV32IZCMP-NEXT: addi a0, a0, 15 @@ -362,8 +362,8 @@ ; RV64IZCMP: # %bb.0: # %entry ; RV64IZCMP-NEXT: cm.push {ra, s0}, -16 ; RV64IZCMP-NEXT: .cfi_def_cfa_offset 16 -; RV64IZCMP-NEXT: .cfi_offset ra, -8 -; RV64IZCMP-NEXT: .cfi_offset s0, -16 +; RV64IZCMP-NEXT: .cfi_offset ra, -16 +; RV64IZCMP-NEXT: .cfi_offset s0, -8 ; RV64IZCMP-NEXT: addi s0, sp, 16 ; RV64IZCMP-NEXT: .cfi_def_cfa s0, 0 ; RV64IZCMP-NEXT: slli a0, a0, 32 @@ -469,8 +469,8 @@ ; RV32IZCMP: # %bb.0: # %entry ; RV32IZCMP-NEXT: cm.push {ra, s0}, -16 ; RV32IZCMP-NEXT: .cfi_def_cfa_offset 16 -; RV32IZCMP-NEXT: .cfi_offset ra, -4 -; RV32IZCMP-NEXT: .cfi_offset s0, -8 +; RV32IZCMP-NEXT: .cfi_offset ra, -8 +; RV32IZCMP-NEXT: .cfi_offset s0, -4 ; RV32IZCMP-NEXT: addi s0, sp, 16 ; RV32IZCMP-NEXT: .cfi_def_cfa s0, 0 ; RV32IZCMP-NEXT: addi a0, a0, 15 @@ -486,8 +486,8 @@ ; RV64IZCMP: # %bb.0: # %entry ; RV64IZCMP-NEXT: cm.push {ra, s0}, -16 ; RV64IZCMP-NEXT: .cfi_def_cfa_offset 16 -; RV64IZCMP-NEXT: .cfi_offset ra, -8 -; RV64IZCMP-NEXT: .cfi_offset s0, -16 +; RV64IZCMP-NEXT: .cfi_offset ra, -16 +; RV64IZCMP-NEXT: .cfi_offset s0, -8 ; RV64IZCMP-NEXT: addi s0, sp, 16 ; RV64IZCMP-NEXT: .cfi_def_cfa s0, 0 ; RV64IZCMP-NEXT: slli a0, a0, 32 @@ -593,8 +593,8 @@ ; RV32IZCMP: # %bb.0: # %entry ; RV32IZCMP-NEXT: cm.push {ra, s0}, -16 ; RV32IZCMP-NEXT: .cfi_def_cfa_offset 16 -; RV32IZCMP-NEXT: .cfi_offset ra, -4 -; RV32IZCMP-NEXT: .cfi_offset s0, -8 +; RV32IZCMP-NEXT: .cfi_offset ra, -8 +; RV32IZCMP-NEXT: .cfi_offset s0, -4 ; RV32IZCMP-NEXT: addi s0, sp, 16 ; RV32IZCMP-NEXT: .cfi_def_cfa s0, 0 ; RV32IZCMP-NEXT: addi a0, a0, 15 @@ -609,8 +609,8 @@ ; RV64IZCMP: # %bb.0: # %entry ; RV64IZCMP-NEXT: cm.push {ra, s0}, -16 ; RV64IZCMP-NEXT: .cfi_def_cfa_offset 16 -; RV64IZCMP-NEXT: .cfi_offset ra, -8 -; RV64IZCMP-NEXT: .cfi_offset s0, -16 +; RV64IZCMP-NEXT: .cfi_offset ra, -16 +; RV64IZCMP-NEXT: .cfi_offset s0, -8 ; RV64IZCMP-NEXT: addi s0, sp, 16 ; RV64IZCMP-NEXT: .cfi_def_cfa s0, 0 ; RV64IZCMP-NEXT: slli a0, a0, 32 @@ -627,8 +627,8 @@ ; RV32IZCMP-SR: # %bb.0: # %entry ; RV32IZCMP-SR-NEXT: cm.push {ra, s0}, -16 ; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 16 -; RV32IZCMP-SR-NEXT: .cfi_offset ra, -4 -; RV32IZCMP-SR-NEXT: .cfi_offset s0, -8 +; RV32IZCMP-SR-NEXT: .cfi_offset ra, -8 +; RV32IZCMP-SR-NEXT: .cfi_offset s0, -4 ; RV32IZCMP-SR-NEXT: addi s0, sp, 16 ; RV32IZCMP-SR-NEXT: .cfi_def_cfa s0, 0 ; RV32IZCMP-SR-NEXT: addi a0, a0, 15 @@ -643,8 +643,8 @@ ; RV64IZCMP-SR: # %bb.0: # %entry ; RV64IZCMP-SR-NEXT: cm.push {ra, s0}, -16 ; RV64IZCMP-SR-NEXT: .cfi_def_cfa_offset 16 -; RV64IZCMP-SR-NEXT: .cfi_offset ra, -8 -; RV64IZCMP-SR-NEXT: .cfi_offset s0, -16 +; RV64IZCMP-SR-NEXT: .cfi_offset ra, -16 +; RV64IZCMP-SR-NEXT: .cfi_offset s0, -8 ; RV64IZCMP-SR-NEXT: addi s0, sp, 16 ; RV64IZCMP-SR-NEXT: .cfi_def_cfa s0, 0 ; RV64IZCMP-SR-NEXT: slli a0, a0, 32 @@ -710,16 +710,16 @@ ; RV32IZCMP: # %bb.0: # %entry ; RV32IZCMP-NEXT: cm.push {ra, s0-s8}, -48 ; RV32IZCMP-NEXT: .cfi_def_cfa_offset 48 -; RV32IZCMP-NEXT: .cfi_offset ra, -4 -; RV32IZCMP-NEXT: .cfi_offset s0, -8 -; RV32IZCMP-NEXT: .cfi_offset s1, -12 -; RV32IZCMP-NEXT: .cfi_offset s2, -16 -; RV32IZCMP-NEXT: .cfi_offset s3, -20 -; RV32IZCMP-NEXT: .cfi_offset s4, -24 -; RV32IZCMP-NEXT: .cfi_offset s5, -28 -; RV32IZCMP-NEXT: .cfi_offset s6, -32 -; RV32IZCMP-NEXT: .cfi_offset s7, -36 -; RV32IZCMP-NEXT: .cfi_offset s8, -40 +; RV32IZCMP-NEXT: .cfi_offset ra, -40 +; RV32IZCMP-NEXT: .cfi_offset s0, -36 +; RV32IZCMP-NEXT: .cfi_offset s1, -32 +; RV32IZCMP-NEXT: .cfi_offset s2, -28 +; RV32IZCMP-NEXT: .cfi_offset s3, -24 +; RV32IZCMP-NEXT: .cfi_offset s4, -20 +; RV32IZCMP-NEXT: .cfi_offset s5, -16 +; RV32IZCMP-NEXT: .cfi_offset s6, -12 +; RV32IZCMP-NEXT: .cfi_offset s7, -8 +; RV32IZCMP-NEXT: .cfi_offset s8, -4 ; RV32IZCMP-NEXT: addi s0, sp, 48 ; RV32IZCMP-NEXT: .cfi_def_cfa s0, 0 ; RV32IZCMP-NEXT: addi a0, a0, 15 @@ -749,16 +749,16 @@ ; RV64IZCMP: # %bb.0: # %entry ; RV64IZCMP-NEXT: cm.push {ra, s0-s8}, -80 ; RV64IZCMP-NEXT: .cfi_def_cfa_offset 80 -; RV64IZCMP-NEXT: .cfi_offset ra, -8 -; RV64IZCMP-NEXT: .cfi_offset s0, -16 -; RV64IZCMP-NEXT: .cfi_offset s1, -24 -; RV64IZCMP-NEXT: .cfi_offset s2, -32 -; RV64IZCMP-NEXT: .cfi_offset s3, -40 -; RV64IZCMP-NEXT: .cfi_offset s4, -48 -; RV64IZCMP-NEXT: .cfi_offset s5, -56 -; RV64IZCMP-NEXT: .cfi_offset s6, -64 -; RV64IZCMP-NEXT: .cfi_offset s7, -72 -; RV64IZCMP-NEXT: .cfi_offset s8, -80 +; RV64IZCMP-NEXT: .cfi_offset ra, -80 +; RV64IZCMP-NEXT: .cfi_offset s0, -72 +; RV64IZCMP-NEXT: .cfi_offset s1, -64 +; RV64IZCMP-NEXT: .cfi_offset s2, -56 +; RV64IZCMP-NEXT: .cfi_offset s3, -48 +; RV64IZCMP-NEXT: .cfi_offset s4, -40 +; RV64IZCMP-NEXT: .cfi_offset s5, -32 +; RV64IZCMP-NEXT: .cfi_offset s6, -24 +; RV64IZCMP-NEXT: .cfi_offset s7, -16 +; RV64IZCMP-NEXT: .cfi_offset s8, -8 ; RV64IZCMP-NEXT: addi s0, sp, 80 ; RV64IZCMP-NEXT: .cfi_def_cfa s0, 0 ; RV64IZCMP-NEXT: slli a0, a0, 32 @@ -790,16 +790,16 @@ ; RV32IZCMP-SR: # %bb.0: # %entry ; RV32IZCMP-SR-NEXT: cm.push {ra, s0-s8}, -48 ; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 48 -; RV32IZCMP-SR-NEXT: .cfi_offset ra, -4 -; RV32IZCMP-SR-NEXT: .cfi_offset s0, -8 -; RV32IZCMP-SR-NEXT: .cfi_offset s1, -12 -; RV32IZCMP-SR-NEXT: .cfi_offset s2, -16 -; RV32IZCMP-SR-NEXT: .cfi_offset s3, -20 -; RV32IZCMP-SR-NEXT: .cfi_offset s4, -24 -; RV32IZCMP-SR-NEXT: .cfi_offset s5, -28 -; RV32IZCMP-SR-NEXT: .cfi_offset s6, -32 -; RV32IZCMP-SR-NEXT: .cfi_offset s7, -36 -; RV32IZCMP-SR-NEXT: .cfi_offset s8, -40 +; RV32IZCMP-SR-NEXT: .cfi_offset ra, -40 +; RV32IZCMP-SR-NEXT: .cfi_offset s0, -36 +; RV32IZCMP-SR-NEXT: .cfi_offset s1, -32 +; RV32IZCMP-SR-NEXT: .cfi_offset s2, -28 +; RV32IZCMP-SR-NEXT: .cfi_offset s3, -24 +; RV32IZCMP-SR-NEXT: .cfi_offset s4, -20 +; RV32IZCMP-SR-NEXT: .cfi_offset s5, -16 +; RV32IZCMP-SR-NEXT: .cfi_offset s6, -12 +; RV32IZCMP-SR-NEXT: .cfi_offset s7, -8 +; RV32IZCMP-SR-NEXT: .cfi_offset s8, -4 ; RV32IZCMP-SR-NEXT: addi s0, sp, 48 ; RV32IZCMP-SR-NEXT: .cfi_def_cfa s0, 0 ; RV32IZCMP-SR-NEXT: addi a0, a0, 15 @@ -829,16 +829,16 @@ ; RV64IZCMP-SR: # %bb.0: # %entry ; RV64IZCMP-SR-NEXT: cm.push {ra, s0-s8}, -80 ; RV64IZCMP-SR-NEXT: .cfi_def_cfa_offset 80 -; RV64IZCMP-SR-NEXT: .cfi_offset ra, -8 -; RV64IZCMP-SR-NEXT: .cfi_offset s0, -16 -; RV64IZCMP-SR-NEXT: .cfi_offset s1, -24 -; RV64IZCMP-SR-NEXT: .cfi_offset s2, -32 -; RV64IZCMP-SR-NEXT: .cfi_offset s3, -40 -; RV64IZCMP-SR-NEXT: .cfi_offset s4, -48 -; RV64IZCMP-SR-NEXT: .cfi_offset s5, -56 -; RV64IZCMP-SR-NEXT: .cfi_offset s6, -64 -; RV64IZCMP-SR-NEXT: .cfi_offset s7, -72 -; RV64IZCMP-SR-NEXT: .cfi_offset s8, -80 +; RV64IZCMP-SR-NEXT: .cfi_offset ra, -80 +; RV64IZCMP-SR-NEXT: .cfi_offset s0, -72 +; RV64IZCMP-SR-NEXT: .cfi_offset s1, -64 +; RV64IZCMP-SR-NEXT: .cfi_offset s2, -56 +; RV64IZCMP-SR-NEXT: .cfi_offset s3, -48 +; RV64IZCMP-SR-NEXT: .cfi_offset s4, -40 +; RV64IZCMP-SR-NEXT: .cfi_offset s5, -32 +; RV64IZCMP-SR-NEXT: .cfi_offset s6, -24 +; RV64IZCMP-SR-NEXT: .cfi_offset s7, -16 +; RV64IZCMP-SR-NEXT: .cfi_offset s8, -8 ; RV64IZCMP-SR-NEXT: addi s0, sp, 80 ; RV64IZCMP-SR-NEXT: .cfi_def_cfa s0, 0 ; RV64IZCMP-SR-NEXT: slli a0, a0, 32