diff --git a/llvm/lib/Target/RISCV/RISCVFrameLowering.h b/llvm/lib/Target/RISCV/RISCVFrameLowering.h --- a/llvm/lib/Target/RISCV/RISCVFrameLowering.h +++ b/llvm/lib/Target/RISCV/RISCVFrameLowering.h @@ -87,6 +87,11 @@ void adjustStackForRVV(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, int64_t Amount, MachineInstr::MIFlag Flag) const; + void emitCalleeSavedRVVPrologCFI(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI, + bool HasFP) const; + void emitCalleeSavedRVVEpilogCFI(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI) const; std::pair assignRVVStackObjectOffsets(MachineFunction &MF) const; }; diff --git a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp @@ -426,6 +426,32 @@ Flag, getStackAlign()); } +static void appendScalableVectorExpression(SmallVectorImpl &Expr, + int FixedOffset, int ScalableOffset, + unsigned DwarfVlenb, + llvm::raw_string_ostream &Comment) { + uint8_t buffer[16]; + if (FixedOffset) { + Expr.push_back(dwarf::DW_OP_consts); + Expr.append(buffer, buffer + encodeSLEB128(FixedOffset, buffer)); + Expr.push_back((uint8_t)dwarf::DW_OP_plus); + Comment << (FixedOffset < 0 ? " - " : " + ") << std::abs(FixedOffset); + } + + Expr.push_back((uint8_t)dwarf::DW_OP_consts); + Expr.append(buffer, buffer + encodeSLEB128(ScalableOffset, buffer)); + + Expr.push_back((uint8_t)dwarf::DW_OP_bregx); + Expr.append(buffer, buffer + encodeULEB128(DwarfVlenb, buffer)); + Expr.push_back(0); + + Expr.push_back((uint8_t)dwarf::DW_OP_mul); + Expr.push_back((uint8_t)dwarf::DW_OP_plus); + + Comment << (ScalableOffset < 0 ? " - " : " + ") << std::abs(ScalableOffset) + << " * vlenb"; +} + static MCCFIInstruction createDefCFAExpression(const TargetRegisterInfo &TRI, Register Reg, uint64_t FixedOffset, @@ -443,29 +469,40 @@ else Comment << printReg(Reg, &TRI); - uint8_t buffer[16]; - if (FixedOffset) { - Expr.push_back(dwarf::DW_OP_consts); - Expr.append(buffer, buffer + encodeSLEB128(FixedOffset, buffer)); - Expr.push_back((uint8_t)dwarf::DW_OP_plus); - Comment << " + " << FixedOffset; - } + appendScalableVectorExpression( + Expr, FixedOffset, ScalableOffset, + TRI.getDwarfRegNum(RISCV::VLENB, true), Comment); - Expr.push_back((uint8_t)dwarf::DW_OP_consts); - Expr.append(buffer, buffer + encodeSLEB128(ScalableOffset, buffer)); + SmallString<64> DefCfaExpr; + uint8_t buffer[16]; + DefCfaExpr.push_back(dwarf::DW_CFA_def_cfa_expression); + DefCfaExpr.append(buffer, buffer + encodeULEB128(Expr.size(), buffer)); + DefCfaExpr.append(Expr.str()); - unsigned DwarfVlenb = TRI.getDwarfRegNum(RISCV::VLENB, true); - Expr.push_back((uint8_t)dwarf::DW_OP_bregx); - Expr.append(buffer, buffer + encodeULEB128(DwarfVlenb, buffer)); - Expr.push_back(0); + return MCCFIInstruction::createEscape(nullptr, DefCfaExpr.str(), SMLoc(), + Comment.str()); +} - Expr.push_back((uint8_t)dwarf::DW_OP_mul); - Expr.push_back((uint8_t)dwarf::DW_OP_plus); +static MCCFIInstruction createDefCFAOffset(const TargetRegisterInfo &TRI, + Register Reg, + uint64_t FixedOffset, + uint64_t ScalableOffset) { + assert(ScalableOffset != 0 && "Did not need to adjust CFA for RVV"); + SmallString<64> Expr; + std::string CommentBuffer; + llvm::raw_string_ostream Comment(CommentBuffer); + Comment << printReg(Reg, &TRI) << " @ cfa"; - Comment << " + " << ScalableOffset << " * vlenb"; + // Build up the expression (FixedOffset + ScalableOffset * VLENB). + appendScalableVectorExpression( + Expr, FixedOffset, ScalableOffset, + TRI.getDwarfRegNum(RISCV::VLENB, true), Comment); SmallString<64> DefCfaExpr; - DefCfaExpr.push_back(dwarf::DW_CFA_def_cfa_expression); + uint8_t buffer[16]; + unsigned DwarfReg = TRI.getDwarfRegNum(Reg, true); + DefCfaExpr.push_back(dwarf::DW_CFA_expression); + DefCfaExpr.append(buffer, buffer + encodeULEB128(DwarfReg, buffer)); DefCfaExpr.append(buffer, buffer + encodeULEB128(Expr.size(), buffer)); DefCfaExpr.append(Expr.str()); @@ -666,6 +703,9 @@ .addCFIIndex(CFIIndex) .setMIFlag(MachineInstr::FrameSetup); } + + std::advance(MBBI, getRVVCalleeSavedInfo(MF, CSI).size()); + emitCalleeSavedRVVPrologCFI(MBB, MBBI, hasFP(MF)); } if (hasFP(MF)) { @@ -753,6 +793,9 @@ uint64_t FPOffset = RealStackSize - RVFI->getVarArgsSaveSize(); uint64_t RVVStackSize = RVFI->getRVVStackSize(); + if (RVVStackSize) + emitCalleeSavedRVVEpilogCFI(MBB, LastFrameDestroy); + // Restore the stack pointer using the value of the frame pointer. Only // necessary if the stack pointer was modified, meaning the stack size is // unknown. @@ -775,6 +818,15 @@ MachineInstr::FrameDestroy); } + if (RVVStackSize) { + unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::cfiDefCfa( + nullptr, RI->getDwarfRegNum(SPReg, true), RealStackSize)); + BuildMI(MBB, LastFrameDestroy, DL, + STI.getInstrInfo()->get(TargetOpcode::CFI_INSTRUCTION)) + .addCFIIndex(CFIIndex) + .setMIFlags(MachineInstr::FrameDestroy); + } + uint64_t FirstSPAdjustAmount = getFirstSPAdjustAmount(MF); if (FirstSPAdjustAmount) { uint64_t SecondSPAdjustAmount = @@ -1401,6 +1453,64 @@ return true; } +void RISCVFrameLowering::emitCalleeSavedRVVPrologCFI( + MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, bool HasFP) const { + MachineFunction *MF = MBB.getParent(); + const MachineFrameInfo &MFI = MF->getFrameInfo(); + RISCVMachineFunctionInfo *RVFI = MF->getInfo(); + const TargetInstrInfo &TII = *STI.getInstrInfo(); + DebugLoc DL = MBB.findDebugLoc(MI); + + const auto &RVVCSI = getRVVCalleeSavedInfo(*MF, MFI.getCalleeSavedInfo()); + if (RVVCSI.empty()) + return; + + uint64_t FixedSize = getStackSizeWithRVVPadding(*MF) + + RVFI->getLibCallStackSize() + + RVFI->getRVPushStackSize(); + if (!HasFP) { + uint64_t ScalarLocalVarSize = + MFI.getStackSize() - RVFI->getCalleeSavedStackSize() - + RVFI->getRVPushStackSize() - RVFI->getVarArgsSaveSize() + + RVFI->getRVVPadding(); + FixedSize -= ScalarLocalVarSize; + } + + for (auto &CS: RVVCSI) { + // Insert the spill to the stack frame. + int FI = CS.getFrameIdx(); + if (FI >= 0 && MFI.getStackID(FI) == TargetStackID::ScalableVector) { + unsigned CFIIndex = MF->addFrameInst(createDefCFAOffset( + *STI.getRegisterInfo(), CS.getReg(), + -FixedSize, MFI.getObjectOffset(FI) / 8)); + BuildMI(MBB, MI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION)) + .addCFIIndex(CFIIndex) + .setMIFlag(MachineInstr::FrameSetup); + } + } +} + +void RISCVFrameLowering::emitCalleeSavedRVVEpilogCFI( + MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const { + MachineFunction *MF = MBB.getParent(); + const MachineFrameInfo &MFI = MF->getFrameInfo(); + const TargetRegisterInfo &TRI = *STI.getRegisterInfo(); + const TargetInstrInfo &TII = *STI.getInstrInfo(); + DebugLoc DL = MBB.findDebugLoc(MI); + + const auto &RVVCSI = getRVVCalleeSavedInfo(*MF, MFI.getCalleeSavedInfo()); + if (RVVCSI.empty()) + return; + + for (const auto &CS : RVVCSI) { + unsigned CFIIndex = MF->addFrameInst(MCCFIInstruction::createRestore( + nullptr, TRI.getDwarfRegNum(CS.getReg(), true))); + BuildMI(MBB, MI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION)) + .addCFIIndex(CFIIndex) + .setMIFlags(MachineInstr::FrameDestroy); + } +} + bool RISCVFrameLowering::restoreCalleeSavedRegisters( MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, MutableArrayRef CSI, const TargetRegisterInfo *TRI) const { diff --git a/llvm/test/CodeGen/RISCV/early-clobber-tied-def-subreg-liveness.ll b/llvm/test/CodeGen/RISCV/early-clobber-tied-def-subreg-liveness.ll --- a/llvm/test/CodeGen/RISCV/early-clobber-tied-def-subreg-liveness.ll +++ b/llvm/test/CodeGen/RISCV/early-clobber-tied-def-subreg-liveness.ll @@ -85,6 +85,7 @@ ; CHECK-NEXT: li a1, 10 ; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll b/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll --- a/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll +++ b/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll @@ -84,6 +84,7 @@ ; CHECK-NEXT: li a1, 24 ; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 32 ; CHECK-NEXT: ld ra, 24(sp) # 8-byte Folded Reload ; CHECK-NEXT: ld s0, 16(sp) # 8-byte Folded Reload ; CHECK-NEXT: addi sp, sp, 32 @@ -145,6 +146,7 @@ ; SUBREGLIVENESS-NEXT: csrr a0, vlenb ; SUBREGLIVENESS-NEXT: slli a0, a0, 4 ; SUBREGLIVENESS-NEXT: add sp, sp, a0 +; SUBREGLIVENESS-NEXT: .cfi_def_cfa sp, 32 ; SUBREGLIVENESS-NEXT: ld ra, 24(sp) # 8-byte Folded Reload ; SUBREGLIVENESS-NEXT: ld s0, 16(sp) # 8-byte Folded Reload ; SUBREGLIVENESS-NEXT: addi sp, sp, 32 diff --git a/llvm/test/CodeGen/RISCV/rvv-cfi-info.ll b/llvm/test/CodeGen/RISCV/rvv-cfi-info.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv-cfi-info.ll @@ -0,0 +1,225 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+v,+m -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=OMIT-FP %s +; RUN: llc -mtriple=riscv64 -mattr=+v,+m -verify-machineinstrs -frame-pointer=all < %s \ +; RUN: | FileCheck -check-prefix=NO-OMIT-FP %s + +define riscv_vector_cc @test_vector_callee_cfi( %va) { +; OMIT-FP-LABEL: test_vector_callee_cfi: +; OMIT-FP: # %bb.0: # %entry +; OMIT-FP-NEXT: addi sp, sp, -16 +; OMIT-FP-NEXT: .cfi_def_cfa_offset 16 +; OMIT-FP-NEXT: csrr a0, vlenb +; OMIT-FP-NEXT: slli a0, a0, 3 +; OMIT-FP-NEXT: sub sp, sp, a0 +; OMIT-FP-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb +; OMIT-FP-NEXT: csrr a0, vlenb +; OMIT-FP-NEXT: li a1, 6 +; OMIT-FP-NEXT: mul a0, a0, a1 +; OMIT-FP-NEXT: add a0, sp, a0 +; OMIT-FP-NEXT: addi a0, a0, 16 +; OMIT-FP-NEXT: vs1r.v v1, (a0) # Unknown-size Folded Spill +; OMIT-FP-NEXT: csrr a0, vlenb +; OMIT-FP-NEXT: slli a1, a0, 2 +; OMIT-FP-NEXT: add a0, a1, a0 +; OMIT-FP-NEXT: add a0, sp, a0 +; OMIT-FP-NEXT: addi a0, a0, 16 +; OMIT-FP-NEXT: vs1r.v v2, (a0) # Unknown-size Folded Spill +; OMIT-FP-NEXT: csrr a0, vlenb +; OMIT-FP-NEXT: slli a0, a0, 2 +; OMIT-FP-NEXT: add a0, sp, a0 +; OMIT-FP-NEXT: addi a0, a0, 16 +; OMIT-FP-NEXT: vs1r.v v3, (a0) # Unknown-size Folded Spill +; OMIT-FP-NEXT: csrr a0, vlenb +; OMIT-FP-NEXT: slli a1, a0, 1 +; OMIT-FP-NEXT: add a0, a1, a0 +; OMIT-FP-NEXT: add a0, sp, a0 +; OMIT-FP-NEXT: addi a0, a0, 16 +; OMIT-FP-NEXT: vs1r.v v4, (a0) # Unknown-size Folded Spill +; OMIT-FP-NEXT: csrr a0, vlenb +; OMIT-FP-NEXT: slli a0, a0, 1 +; OMIT-FP-NEXT: add a0, sp, a0 +; OMIT-FP-NEXT: addi a0, a0, 16 +; OMIT-FP-NEXT: vs1r.v v5, (a0) # Unknown-size Folded Spill +; OMIT-FP-NEXT: csrr a0, vlenb +; OMIT-FP-NEXT: add a0, sp, a0 +; OMIT-FP-NEXT: addi a0, a0, 16 +; OMIT-FP-NEXT: vs1r.v v6, (a0) # Unknown-size Folded Spill +; OMIT-FP-NEXT: addi a0, sp, 16 +; OMIT-FP-NEXT: vs1r.v v7, (a0) # Unknown-size Folded Spill +; OMIT-FP-NEXT: .cfi_escape 0x10, 0x61, 0x08, 0x11, 0x7e, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # $v1 @ cfa - 2 * vlenb +; OMIT-FP-NEXT: .cfi_escape 0x10, 0x62, 0x08, 0x11, 0x7d, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # $v2 @ cfa - 3 * vlenb +; OMIT-FP-NEXT: .cfi_escape 0x10, 0x63, 0x08, 0x11, 0x7c, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # $v3 @ cfa - 4 * vlenb +; OMIT-FP-NEXT: .cfi_escape 0x10, 0x64, 0x08, 0x11, 0x7b, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # $v4 @ cfa - 5 * vlenb +; OMIT-FP-NEXT: .cfi_escape 0x10, 0x65, 0x08, 0x11, 0x7a, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # $v5 @ cfa - 6 * vlenb +; OMIT-FP-NEXT: .cfi_escape 0x10, 0x66, 0x08, 0x11, 0x79, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # $v6 @ cfa - 7 * vlenb +; OMIT-FP-NEXT: .cfi_escape 0x10, 0x67, 0x08, 0x11, 0x78, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # $v7 @ cfa - 8 * vlenb +; OMIT-FP-NEXT: #APP +; OMIT-FP-NEXT: #NO_APP +; OMIT-FP-NEXT: csrr a0, vlenb +; OMIT-FP-NEXT: li a1, 6 +; OMIT-FP-NEXT: mul a0, a0, a1 +; OMIT-FP-NEXT: add a0, sp, a0 +; OMIT-FP-NEXT: addi a0, a0, 16 +; OMIT-FP-NEXT: vl1r.v v1, (a0) # Unknown-size Folded Reload +; OMIT-FP-NEXT: csrr a0, vlenb +; OMIT-FP-NEXT: slli a1, a0, 2 +; OMIT-FP-NEXT: add a0, a1, a0 +; OMIT-FP-NEXT: add a0, sp, a0 +; OMIT-FP-NEXT: addi a0, a0, 16 +; OMIT-FP-NEXT: vl1r.v v2, (a0) # Unknown-size Folded Reload +; OMIT-FP-NEXT: csrr a0, vlenb +; OMIT-FP-NEXT: slli a0, a0, 2 +; OMIT-FP-NEXT: add a0, sp, a0 +; OMIT-FP-NEXT: addi a0, a0, 16 +; OMIT-FP-NEXT: vl1r.v v3, (a0) # Unknown-size Folded Reload +; OMIT-FP-NEXT: csrr a0, vlenb +; OMIT-FP-NEXT: slli a1, a0, 1 +; OMIT-FP-NEXT: add a0, a1, a0 +; OMIT-FP-NEXT: add a0, sp, a0 +; OMIT-FP-NEXT: addi a0, a0, 16 +; OMIT-FP-NEXT: vl1r.v v4, (a0) # Unknown-size Folded Reload +; OMIT-FP-NEXT: csrr a0, vlenb +; OMIT-FP-NEXT: slli a0, a0, 1 +; OMIT-FP-NEXT: add a0, sp, a0 +; OMIT-FP-NEXT: addi a0, a0, 16 +; OMIT-FP-NEXT: vl1r.v v5, (a0) # Unknown-size Folded Reload +; OMIT-FP-NEXT: csrr a0, vlenb +; OMIT-FP-NEXT: add a0, sp, a0 +; OMIT-FP-NEXT: addi a0, a0, 16 +; OMIT-FP-NEXT: vl1r.v v6, (a0) # Unknown-size Folded Reload +; OMIT-FP-NEXT: addi a0, sp, 16 +; OMIT-FP-NEXT: vl1r.v v7, (a0) # Unknown-size Folded Reload +; OMIT-FP-NEXT: .cfi_restore v1 +; OMIT-FP-NEXT: .cfi_restore v2 +; OMIT-FP-NEXT: .cfi_restore v3 +; OMIT-FP-NEXT: .cfi_restore v4 +; OMIT-FP-NEXT: .cfi_restore v5 +; OMIT-FP-NEXT: .cfi_restore v6 +; OMIT-FP-NEXT: .cfi_restore v7 +; OMIT-FP-NEXT: csrr a0, vlenb +; OMIT-FP-NEXT: slli a0, a0, 3 +; OMIT-FP-NEXT: add sp, sp, a0 +; OMIT-FP-NEXT: .cfi_def_cfa sp, 16 +; OMIT-FP-NEXT: addi sp, sp, 16 +; OMIT-FP-NEXT: ret +; +; NO-OMIT-FP-LABEL: test_vector_callee_cfi: +; NO-OMIT-FP: # %bb.0: # %entry +; NO-OMIT-FP-NEXT: addi sp, sp, -32 +; NO-OMIT-FP-NEXT: .cfi_def_cfa_offset 32 +; NO-OMIT-FP-NEXT: sd ra, 24(sp) # 8-byte Folded Spill +; NO-OMIT-FP-NEXT: sd s0, 16(sp) # 8-byte Folded Spill +; NO-OMIT-FP-NEXT: .cfi_offset ra, -8 +; NO-OMIT-FP-NEXT: .cfi_offset s0, -16 +; NO-OMIT-FP-NEXT: addi s0, sp, 32 +; NO-OMIT-FP-NEXT: .cfi_def_cfa s0, 0 +; NO-OMIT-FP-NEXT: csrr a0, vlenb +; NO-OMIT-FP-NEXT: slli a0, a0, 3 +; NO-OMIT-FP-NEXT: sub sp, sp, a0 +; NO-OMIT-FP-NEXT: csrr a0, vlenb +; NO-OMIT-FP-NEXT: slli a0, a0, 1 +; NO-OMIT-FP-NEXT: sub a0, s0, a0 +; NO-OMIT-FP-NEXT: addi a0, a0, -32 +; NO-OMIT-FP-NEXT: vs1r.v v1, (a0) # Unknown-size Folded Spill +; NO-OMIT-FP-NEXT: csrr a0, vlenb +; NO-OMIT-FP-NEXT: slli a1, a0, 1 +; NO-OMIT-FP-NEXT: add a0, a1, a0 +; NO-OMIT-FP-NEXT: sub a0, s0, a0 +; NO-OMIT-FP-NEXT: addi a0, a0, -32 +; NO-OMIT-FP-NEXT: vs1r.v v2, (a0) # Unknown-size Folded Spill +; NO-OMIT-FP-NEXT: csrr a0, vlenb +; NO-OMIT-FP-NEXT: slli a0, a0, 2 +; NO-OMIT-FP-NEXT: sub a0, s0, a0 +; NO-OMIT-FP-NEXT: addi a0, a0, -32 +; NO-OMIT-FP-NEXT: vs1r.v v3, (a0) # Unknown-size Folded Spill +; NO-OMIT-FP-NEXT: csrr a0, vlenb +; NO-OMIT-FP-NEXT: slli a1, a0, 2 +; NO-OMIT-FP-NEXT: add a0, a1, a0 +; NO-OMIT-FP-NEXT: sub a0, s0, a0 +; NO-OMIT-FP-NEXT: addi a0, a0, -32 +; NO-OMIT-FP-NEXT: vs1r.v v4, (a0) # Unknown-size Folded Spill +; NO-OMIT-FP-NEXT: csrr a0, vlenb +; NO-OMIT-FP-NEXT: li a1, 6 +; NO-OMIT-FP-NEXT: mul a0, a0, a1 +; NO-OMIT-FP-NEXT: sub a0, s0, a0 +; NO-OMIT-FP-NEXT: addi a0, a0, -32 +; NO-OMIT-FP-NEXT: vs1r.v v5, (a0) # Unknown-size Folded Spill +; NO-OMIT-FP-NEXT: csrr a0, vlenb +; NO-OMIT-FP-NEXT: slli a1, a0, 3 +; NO-OMIT-FP-NEXT: sub a0, a1, a0 +; NO-OMIT-FP-NEXT: sub a0, s0, a0 +; NO-OMIT-FP-NEXT: addi a0, a0, -32 +; NO-OMIT-FP-NEXT: vs1r.v v6, (a0) # Unknown-size Folded Spill +; NO-OMIT-FP-NEXT: csrr a0, vlenb +; NO-OMIT-FP-NEXT: slli a0, a0, 3 +; NO-OMIT-FP-NEXT: sub a0, s0, a0 +; NO-OMIT-FP-NEXT: addi a0, a0, -32 +; NO-OMIT-FP-NEXT: vs1r.v v7, (a0) # Unknown-size Folded Spill +; NO-OMIT-FP-NEXT: .cfi_escape 0x10, 0x61, 0x0b, 0x11, 0x60, 0x22, 0x11, 0x7e, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # $v1 @ cfa - 32 - 2 * vlenb +; NO-OMIT-FP-NEXT: .cfi_escape 0x10, 0x62, 0x0b, 0x11, 0x60, 0x22, 0x11, 0x7d, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # $v2 @ cfa - 32 - 3 * vlenb +; NO-OMIT-FP-NEXT: .cfi_escape 0x10, 0x63, 0x0b, 0x11, 0x60, 0x22, 0x11, 0x7c, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # $v3 @ cfa - 32 - 4 * vlenb +; NO-OMIT-FP-NEXT: .cfi_escape 0x10, 0x64, 0x0b, 0x11, 0x60, 0x22, 0x11, 0x7b, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # $v4 @ cfa - 32 - 5 * vlenb +; NO-OMIT-FP-NEXT: .cfi_escape 0x10, 0x65, 0x0b, 0x11, 0x60, 0x22, 0x11, 0x7a, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # $v5 @ cfa - 32 - 6 * vlenb +; NO-OMIT-FP-NEXT: .cfi_escape 0x10, 0x66, 0x0b, 0x11, 0x60, 0x22, 0x11, 0x79, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # $v6 @ cfa - 32 - 7 * vlenb +; NO-OMIT-FP-NEXT: .cfi_escape 0x10, 0x67, 0x0b, 0x11, 0x60, 0x22, 0x11, 0x78, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # $v7 @ cfa - 32 - 8 * vlenb +; NO-OMIT-FP-NEXT: #APP +; NO-OMIT-FP-NEXT: #NO_APP +; NO-OMIT-FP-NEXT: csrr a0, vlenb +; NO-OMIT-FP-NEXT: slli a0, a0, 1 +; NO-OMIT-FP-NEXT: sub a0, s0, a0 +; NO-OMIT-FP-NEXT: addi a0, a0, -32 +; NO-OMIT-FP-NEXT: vl1r.v v1, (a0) # Unknown-size Folded Reload +; NO-OMIT-FP-NEXT: csrr a0, vlenb +; NO-OMIT-FP-NEXT: slli a1, a0, 1 +; NO-OMIT-FP-NEXT: add a0, a1, a0 +; NO-OMIT-FP-NEXT: sub a0, s0, a0 +; NO-OMIT-FP-NEXT: addi a0, a0, -32 +; NO-OMIT-FP-NEXT: vl1r.v v2, (a0) # Unknown-size Folded Reload +; NO-OMIT-FP-NEXT: csrr a0, vlenb +; NO-OMIT-FP-NEXT: slli a0, a0, 2 +; NO-OMIT-FP-NEXT: sub a0, s0, a0 +; NO-OMIT-FP-NEXT: addi a0, a0, -32 +; NO-OMIT-FP-NEXT: vl1r.v v3, (a0) # Unknown-size Folded Reload +; NO-OMIT-FP-NEXT: csrr a0, vlenb +; NO-OMIT-FP-NEXT: slli a1, a0, 2 +; NO-OMIT-FP-NEXT: add a0, a1, a0 +; NO-OMIT-FP-NEXT: sub a0, s0, a0 +; NO-OMIT-FP-NEXT: addi a0, a0, -32 +; NO-OMIT-FP-NEXT: vl1r.v v4, (a0) # Unknown-size Folded Reload +; NO-OMIT-FP-NEXT: csrr a0, vlenb +; NO-OMIT-FP-NEXT: li a1, 6 +; NO-OMIT-FP-NEXT: mul a0, a0, a1 +; NO-OMIT-FP-NEXT: sub a0, s0, a0 +; NO-OMIT-FP-NEXT: addi a0, a0, -32 +; NO-OMIT-FP-NEXT: vl1r.v v5, (a0) # Unknown-size Folded Reload +; NO-OMIT-FP-NEXT: csrr a0, vlenb +; NO-OMIT-FP-NEXT: slli a1, a0, 3 +; NO-OMIT-FP-NEXT: sub a0, a1, a0 +; NO-OMIT-FP-NEXT: sub a0, s0, a0 +; NO-OMIT-FP-NEXT: addi a0, a0, -32 +; NO-OMIT-FP-NEXT: vl1r.v v6, (a0) # Unknown-size Folded Reload +; NO-OMIT-FP-NEXT: csrr a0, vlenb +; NO-OMIT-FP-NEXT: slli a0, a0, 3 +; NO-OMIT-FP-NEXT: sub a0, s0, a0 +; NO-OMIT-FP-NEXT: addi a0, a0, -32 +; NO-OMIT-FP-NEXT: vl1r.v v7, (a0) # Unknown-size Folded Reload +; NO-OMIT-FP-NEXT: .cfi_restore v1 +; NO-OMIT-FP-NEXT: .cfi_restore v2 +; NO-OMIT-FP-NEXT: .cfi_restore v3 +; NO-OMIT-FP-NEXT: .cfi_restore v4 +; NO-OMIT-FP-NEXT: .cfi_restore v5 +; NO-OMIT-FP-NEXT: .cfi_restore v6 +; NO-OMIT-FP-NEXT: .cfi_restore v7 +; NO-OMIT-FP-NEXT: addi sp, s0, -32 +; NO-OMIT-FP-NEXT: .cfi_def_cfa sp, 32 +; NO-OMIT-FP-NEXT: ld ra, 24(sp) # 8-byte Folded Reload +; NO-OMIT-FP-NEXT: ld s0, 16(sp) # 8-byte Folded Reload +; NO-OMIT-FP-NEXT: addi sp, sp, 32 +; NO-OMIT-FP-NEXT: ret +entry: + call void asm sideeffect "", + "~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7}"() + + ret %va +} diff --git a/llvm/test/CodeGen/RISCV/rvv/abs-vp.ll b/llvm/test/CodeGen/RISCV/rvv/abs-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/abs-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/abs-vp.ll @@ -650,6 +650,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %v = call @llvm.vp.abs.nxv16i64( %va, i1 false, %m, i32 %evl) diff --git a/llvm/test/CodeGen/RISCV/rvv/access-fixed-objects-by-rvv.ll b/llvm/test/CodeGen/RISCV/rvv/access-fixed-objects-by-rvv.ll --- a/llvm/test/CodeGen/RISCV/rvv/access-fixed-objects-by-rvv.ll +++ b/llvm/test/CodeGen/RISCV/rvv/access-fixed-objects-by-rvv.ll @@ -46,6 +46,7 @@ ; RV64IV-NEXT: csrr a0, vlenb ; RV64IV-NEXT: slli a0, a0, 1 ; RV64IV-NEXT: add sp, sp, a0 +; RV64IV-NEXT: .cfi_def_cfa sp, 528 ; RV64IV-NEXT: addi sp, sp, 528 ; RV64IV-NEXT: ret %local = alloca i64 diff --git a/llvm/test/CodeGen/RISCV/rvv/addi-scalable-offset.mir b/llvm/test/CodeGen/RISCV/rvv/addi-scalable-offset.mir --- a/llvm/test/CodeGen/RISCV/rvv/addi-scalable-offset.mir +++ b/llvm/test/CodeGen/RISCV/rvv/addi-scalable-offset.mir @@ -50,6 +50,7 @@ ; CHECK-NEXT: VS1R_V killed renamable $v8, killed renamable $x10 ; CHECK-NEXT: $x2 = frame-destroy ADDI $x8, -2048 ; CHECK-NEXT: $x2 = frame-destroy ADDI killed $x2, -224 + ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $x2, 2272 ; CHECK-NEXT: $x2 = frame-destroy ADDI $x2, 240 ; CHECK-NEXT: $x1 = LD $x2, 2024 :: (load (s64) from %stack.3) ; CHECK-NEXT: $x8 = LD $x2, 2016 :: (load (s64) from %stack.4) diff --git a/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-struct.ll b/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-struct.ll --- a/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-struct.ll +++ b/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-struct.ll @@ -29,6 +29,7 @@ ; CHECK-NEXT: csrrs a0, vlenb, zero ; CHECK-NEXT: slli a0, a0, 1 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: jalr zero, 0(ra) entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/bitreverse-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/bitreverse-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/bitreverse-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/bitreverse-sdnode.ll @@ -1580,6 +1580,7 @@ ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 3 ; RV32-NEXT: add sp, sp, a0 +; RV32-NEXT: .cfi_def_cfa sp, 48 ; RV32-NEXT: addi sp, sp, 48 ; RV32-NEXT: ret ; diff --git a/llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll b/llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll @@ -3223,6 +3223,7 @@ ; RV32-NEXT: li a1, 24 ; RV32-NEXT: mul a0, a0, a1 ; RV32-NEXT: add sp, sp, a0 +; RV32-NEXT: .cfi_def_cfa sp, 48 ; RV32-NEXT: addi sp, sp, 48 ; RV32-NEXT: ret ; @@ -3300,6 +3301,7 @@ ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 3 ; RV64-NEXT: add sp, sp, a0 +; RV64-NEXT: .cfi_def_cfa sp, 16 ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: ret ; @@ -3404,6 +3406,7 @@ ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 3 ; RV32-NEXT: add sp, sp, a0 +; RV32-NEXT: .cfi_def_cfa sp, 48 ; RV32-NEXT: addi sp, sp, 48 ; RV32-NEXT: ret ; @@ -3605,6 +3608,7 @@ ; RV32-NEXT: li a1, 24 ; RV32-NEXT: mul a0, a0, a1 ; RV32-NEXT: add sp, sp, a0 +; RV32-NEXT: .cfi_def_cfa sp, 48 ; RV32-NEXT: addi sp, sp, 48 ; RV32-NEXT: ret ; @@ -3682,6 +3686,7 @@ ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 3 ; RV64-NEXT: add sp, sp, a0 +; RV64-NEXT: .cfi_def_cfa sp, 16 ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: ret ; @@ -3786,6 +3791,7 @@ ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 3 ; RV32-NEXT: add sp, sp, a0 +; RV32-NEXT: .cfi_def_cfa sp, 48 ; RV32-NEXT: addi sp, sp, 48 ; RV32-NEXT: ret ; @@ -3948,6 +3954,7 @@ ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 4 ; RV32-NEXT: add sp, sp, a0 +; RV32-NEXT: .cfi_def_cfa sp, 16 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; @@ -4035,6 +4042,7 @@ ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 4 ; RV64-NEXT: add sp, sp, a0 +; RV64-NEXT: .cfi_def_cfa sp, 16 ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: ret ; diff --git a/llvm/test/CodeGen/RISCV/rvv/bswap-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/bswap-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/bswap-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/bswap-sdnode.ll @@ -633,6 +633,7 @@ ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 3 ; RV32-NEXT: add sp, sp, a0 +; RV32-NEXT: .cfi_def_cfa sp, 16 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; diff --git a/llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll b/llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll @@ -1292,6 +1292,7 @@ ; RV32-NEXT: li a1, 24 ; RV32-NEXT: mul a0, a0, a1 ; RV32-NEXT: add sp, sp, a0 +; RV32-NEXT: .cfi_def_cfa sp, 16 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; @@ -1342,6 +1343,7 @@ ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 3 ; RV64-NEXT: add sp, sp, a0 +; RV64-NEXT: .cfi_def_cfa sp, 16 ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: ret ; @@ -1406,6 +1408,7 @@ ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 3 ; RV32-NEXT: add sp, sp, a0 +; RV32-NEXT: .cfi_def_cfa sp, 16 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; @@ -1541,6 +1544,7 @@ ; RV32-NEXT: li a1, 24 ; RV32-NEXT: mul a0, a0, a1 ; RV32-NEXT: add sp, sp, a0 +; RV32-NEXT: .cfi_def_cfa sp, 16 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; @@ -1591,6 +1595,7 @@ ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 3 ; RV64-NEXT: add sp, sp, a0 +; RV64-NEXT: .cfi_def_cfa sp, 16 ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: ret ; @@ -1655,6 +1660,7 @@ ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 3 ; RV32-NEXT: add sp, sp, a0 +; RV32-NEXT: .cfi_def_cfa sp, 16 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; @@ -1754,6 +1760,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; diff --git a/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll b/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll --- a/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll +++ b/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll @@ -184,6 +184,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 5 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %v = load , * %x @@ -272,6 +273,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %r = add %x, %y @@ -315,6 +317,7 @@ ; RV32-NEXT: vmv8r.v v16, v24 ; RV32-NEXT: call ext2@plt ; RV32-NEXT: addi sp, s0, -144 +; RV32-NEXT: .cfi_def_cfa sp, 144 ; RV32-NEXT: lw ra, 140(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s0, 136(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 144 @@ -349,6 +352,7 @@ ; RV64-NEXT: vmv8r.v v16, v24 ; RV64-NEXT: call ext2@plt ; RV64-NEXT: addi sp, s0, -144 +; RV64-NEXT: .cfi_def_cfa sp, 144 ; RV64-NEXT: ld ra, 136(sp) # 8-byte Folded Reload ; RV64-NEXT: ld s0, 128(sp) # 8-byte Folded Reload ; RV64-NEXT: addi sp, sp, 144 @@ -421,6 +425,7 @@ ; RV32-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload ; RV32-NEXT: call ext3@plt ; RV32-NEXT: addi sp, s0, -144 +; RV32-NEXT: .cfi_def_cfa sp, 144 ; RV32-NEXT: lw ra, 140(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s0, 136(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 144 @@ -489,6 +494,7 @@ ; RV64-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload ; RV64-NEXT: call ext3@plt ; RV64-NEXT: addi sp, s0, -144 +; RV64-NEXT: .cfi_def_cfa sp, 144 ; RV64-NEXT: ld ra, 136(sp) # 8-byte Folded Reload ; RV64-NEXT: ld s0, 128(sp) # 8-byte Folded Reload ; RV64-NEXT: addi sp, sp, 144 @@ -564,6 +570,7 @@ ; RV32-NEXT: vmv.v.i v16, 0 ; RV32-NEXT: call vector_arg_indirect_stack@plt ; RV32-NEXT: addi sp, s0, -144 +; RV32-NEXT: .cfi_def_cfa sp, 144 ; RV32-NEXT: lw ra, 140(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s0, 136(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 144 @@ -615,6 +622,7 @@ ; RV64-NEXT: vmv.v.i v16, 0 ; RV64-NEXT: call vector_arg_indirect_stack@plt ; RV64-NEXT: addi sp, s0, -144 +; RV64-NEXT: .cfi_def_cfa sp, 144 ; RV64-NEXT: ld ra, 136(sp) # 8-byte Folded Reload ; RV64-NEXT: ld s0, 128(sp) # 8-byte Folded Reload ; RV64-NEXT: addi sp, sp, 144 diff --git a/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll b/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll --- a/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll +++ b/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll @@ -47,6 +47,7 @@ ; RV32-NEXT: vmv.v.i v16, 0 ; RV32-NEXT: call callee_scalable_vector_split_indirect@plt ; RV32-NEXT: addi sp, s0, -144 +; RV32-NEXT: .cfi_def_cfa sp, 144 ; RV32-NEXT: lw ra, 140(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s0, 136(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 144 @@ -78,6 +79,7 @@ ; RV64-NEXT: vmv.v.i v16, 0 ; RV64-NEXT: call callee_scalable_vector_split_indirect@plt ; RV64-NEXT: addi sp, s0, -144 +; RV64-NEXT: .cfi_def_cfa sp, 144 ; RV64-NEXT: ld ra, 136(sp) # 8-byte Folded Reload ; RV64-NEXT: ld s0, 128(sp) # 8-byte Folded Reload ; RV64-NEXT: addi sp, sp, 144 diff --git a/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll b/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll @@ -790,6 +790,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %v = call @llvm.vp.ceil.nxv16f64( %va, %m, i32 %evl) diff --git a/llvm/test/CodeGen/RISCV/rvv/ctlz-vp.ll b/llvm/test/CodeGen/RISCV/rvv/ctlz-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/ctlz-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/ctlz-vp.ll @@ -2747,6 +2747,7 @@ } define @vp_ctlz_zero_undef_nxv16i64_unmasked( %va, i32 zeroext %evl) { +; ; CHECK-LABEL: vp_ctlz_zero_undef_nxv16i64_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a1, vlenb diff --git a/llvm/test/CodeGen/RISCV/rvv/ctpop-vp.ll b/llvm/test/CodeGen/RISCV/rvv/ctpop-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/ctpop-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/ctpop-vp.ll @@ -2964,6 +2964,7 @@ ; RV32-NEXT: li a1, 56 ; RV32-NEXT: mul a0, a0, a1 ; RV32-NEXT: add sp, sp, a0 +; RV32-NEXT: .cfi_def_cfa sp, 48 ; RV32-NEXT: addi sp, sp, 48 ; RV32-NEXT: ret ; @@ -3049,6 +3050,7 @@ ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 4 ; RV64-NEXT: add sp, sp, a0 +; RV64-NEXT: .cfi_def_cfa sp, 16 ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: ret ; @@ -3206,6 +3208,7 @@ ; RV32-NEXT: li a1, 40 ; RV32-NEXT: mul a0, a0, a1 ; RV32-NEXT: add sp, sp, a0 +; RV32-NEXT: .cfi_def_cfa sp, 48 ; RV32-NEXT: addi sp, sp, 48 ; RV32-NEXT: ret ; diff --git a/llvm/test/CodeGen/RISCV/rvv/cttz-vp.ll b/llvm/test/CodeGen/RISCV/rvv/cttz-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/cttz-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/cttz-vp.ll @@ -3243,6 +3243,7 @@ ; RV32-NEXT: li a1, 40 ; RV32-NEXT: mul a0, a0, a1 ; RV32-NEXT: add sp, sp, a0 +; RV32-NEXT: .cfi_def_cfa sp, 48 ; RV32-NEXT: addi sp, sp, 48 ; RV32-NEXT: ret ; @@ -3335,6 +3336,7 @@ ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 4 ; RV64-NEXT: add sp, sp, a0 +; RV64-NEXT: .cfi_def_cfa sp, 16 ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: ret ; @@ -3486,6 +3488,7 @@ ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 5 ; RV32-NEXT: add sp, sp, a0 +; RV32-NEXT: .cfi_def_cfa sp, 48 ; RV32-NEXT: addi sp, sp, 48 ; RV32-NEXT: ret ; @@ -4920,6 +4923,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; diff --git a/llvm/test/CodeGen/RISCV/rvv/emergency-slot.mir b/llvm/test/CodeGen/RISCV/rvv/emergency-slot.mir --- a/llvm/test/CodeGen/RISCV/rvv/emergency-slot.mir +++ b/llvm/test/CodeGen/RISCV/rvv/emergency-slot.mir @@ -152,6 +152,7 @@ ; CHECK-NEXT: bb.2: ; CHECK-NEXT: $x2 = frame-destroy ADDI $x8, -2048 ; CHECK-NEXT: $x2 = frame-destroy ADDI killed $x2, -256 + ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $x2, 2304 ; CHECK-NEXT: $x2 = frame-destroy ADDI $x2, 272 ; CHECK-NEXT: $x1 = LD $x2, 2024 :: (load (s64) from %stack.3) ; CHECK-NEXT: $x8 = LD $x2, 2016 :: (load (s64) from %stack.4) diff --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll --- a/llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll @@ -615,6 +615,7 @@ ; RV32-NEXT: add a0, a1, a0 ; RV32-NEXT: fld fa0, -8(a0) ; RV32-NEXT: addi sp, s0, -80 +; RV32-NEXT: .cfi_def_cfa sp, 80 ; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 80 @@ -652,6 +653,7 @@ ; RV64-NEXT: add a0, a0, a2 ; RV64-NEXT: fld fa0, 0(a0) ; RV64-NEXT: addi sp, s0, -80 +; RV64-NEXT: .cfi_def_cfa sp, 80 ; RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; RV64-NEXT: addi sp, sp, 80 @@ -702,6 +704,7 @@ ; RV32-NEXT: vs8r.v v16, (a1) ; RV32-NEXT: fld fa0, 0(a0) ; RV32-NEXT: addi sp, s0, -80 +; RV32-NEXT: .cfi_def_cfa sp, 80 ; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 80 @@ -737,6 +740,7 @@ ; RV64-NEXT: vs8r.v v16, (a1) ; RV64-NEXT: fld fa0, 0(a0) ; RV64-NEXT: addi sp, s0, -80 +; RV64-NEXT: .cfi_def_cfa sp, 80 ; RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; RV64-NEXT: addi sp, sp, 80 diff --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll @@ -895,6 +895,7 @@ ; CHECK-NEXT: add a0, a1, a0 ; CHECK-NEXT: lw a0, -4(a0) ; CHECK-NEXT: addi sp, s0, -80 +; CHECK-NEXT: .cfi_def_cfa sp, 80 ; CHECK-NEXT: lw ra, 76(sp) # 4-byte Folded Reload ; CHECK-NEXT: lw s0, 72(sp) # 4-byte Folded Reload ; CHECK-NEXT: addi sp, sp, 80 @@ -945,6 +946,7 @@ ; CHECK-NEXT: vs8r.v v16, (a1) ; CHECK-NEXT: lw a0, 0(a0) ; CHECK-NEXT: addi sp, s0, -80 +; CHECK-NEXT: .cfi_def_cfa sp, 80 ; CHECK-NEXT: lw ra, 76(sp) # 4-byte Folded Reload ; CHECK-NEXT: lw s0, 72(sp) # 4-byte Folded Reload ; CHECK-NEXT: addi sp, sp, 80 diff --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll @@ -834,6 +834,7 @@ ; CHECK-NEXT: add a0, a0, a2 ; CHECK-NEXT: ld a0, 0(a0) ; CHECK-NEXT: addi sp, s0, -80 +; CHECK-NEXT: .cfi_def_cfa sp, 80 ; CHECK-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; CHECK-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; CHECK-NEXT: addi sp, sp, 80 @@ -884,6 +885,7 @@ ; CHECK-NEXT: vs8r.v v16, (a1) ; CHECK-NEXT: ld a0, 0(a0) ; CHECK-NEXT: addi sp, s0, -80 +; CHECK-NEXT: .cfi_def_cfa sp, 80 ; CHECK-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; CHECK-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; CHECK-NEXT: addi sp, sp, 80 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse-vp.ll @@ -2338,6 +2338,7 @@ ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 5 ; RV32-NEXT: add sp, sp, a0 +; RV32-NEXT: .cfi_def_cfa sp, 16 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; @@ -2415,6 +2416,7 @@ ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 3 ; RV64-NEXT: add sp, sp, a0 +; RV64-NEXT: .cfi_def_cfa sp, 16 ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: ret %v = call <15 x i64> @llvm.vp.bitreverse.v15i64(<15 x i64> %va, <15 x i1> %m, i32 %evl) @@ -2506,6 +2508,7 @@ ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 3 ; RV32-NEXT: add sp, sp, a0 +; RV32-NEXT: .cfi_def_cfa sp, 16 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; @@ -2718,6 +2721,7 @@ ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 5 ; RV32-NEXT: add sp, sp, a0 +; RV32-NEXT: .cfi_def_cfa sp, 16 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; @@ -2795,6 +2799,7 @@ ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 3 ; RV64-NEXT: add sp, sp, a0 +; RV64-NEXT: .cfi_def_cfa sp, 16 ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: ret %v = call <16 x i64> @llvm.vp.bitreverse.v16i64(<16 x i64> %va, <16 x i1> %m, i32 %evl) @@ -2886,6 +2891,7 @@ ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 3 ; RV32-NEXT: add sp, sp, a0 +; RV32-NEXT: .cfi_def_cfa sp, 16 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; @@ -3039,6 +3045,7 @@ ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 4 ; RV32-NEXT: add sp, sp, a0 +; RV32-NEXT: .cfi_def_cfa sp, 16 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; @@ -3124,6 +3131,7 @@ ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 4 ; RV64-NEXT: add sp, sp, a0 +; RV64-NEXT: .cfi_def_cfa sp, 16 ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: ret %v = call <128 x i16> @llvm.vp.bitreverse.v128i16(<128 x i16> %va, <128 x i1> %m, i32 %evl) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap-vp.ll @@ -1008,6 +1008,7 @@ ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 5 ; RV32-NEXT: add sp, sp, a0 +; RV32-NEXT: .cfi_def_cfa sp, 16 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; @@ -1058,6 +1059,7 @@ ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 3 ; RV64-NEXT: add sp, sp, a0 +; RV64-NEXT: .cfi_def_cfa sp, 16 ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: ret %v = call <15 x i64> @llvm.vp.bswap.v15i64(<15 x i64> %va, <15 x i1> %m, i32 %evl) @@ -1121,6 +1123,7 @@ ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 3 ; RV32-NEXT: add sp, sp, a0 +; RV32-NEXT: .cfi_def_cfa sp, 16 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; @@ -1278,6 +1281,7 @@ ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 5 ; RV32-NEXT: add sp, sp, a0 +; RV32-NEXT: .cfi_def_cfa sp, 16 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; @@ -1328,6 +1332,7 @@ ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 3 ; RV64-NEXT: add sp, sp, a0 +; RV64-NEXT: .cfi_def_cfa sp, 16 ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: ret %v = call <16 x i64> @llvm.vp.bswap.v16i64(<16 x i64> %va, <16 x i1> %m, i32 %evl) @@ -1391,6 +1396,7 @@ ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 3 ; RV32-NEXT: add sp, sp, a0 +; RV32-NEXT: .cfi_def_cfa sp, 16 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; @@ -1481,6 +1487,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %v = call <128 x i16> @llvm.vp.bswap.v128i16(<128 x i16> %va, <128 x i1> %m, i32 %evl) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll @@ -659,6 +659,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %v = call <32 x double> @llvm.vp.ceil.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz-vp.ll @@ -2800,6 +2800,7 @@ ; RV32-NEXT: li a1, 56 ; RV32-NEXT: mul a0, a0, a1 ; RV32-NEXT: add sp, sp, a0 +; RV32-NEXT: .cfi_def_cfa sp, 16 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; @@ -2911,6 +2912,7 @@ ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 4 ; RV64-NEXT: add sp, sp, a0 +; RV64-NEXT: .cfi_def_cfa sp, 16 ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: ret %v = call <32 x i64> @llvm.vp.ctlz.v32i64(<32 x i64> %va, i1 false, <32 x i1> %m, i32 %evl) @@ -3065,6 +3067,7 @@ ; RV32-NEXT: li a1, 40 ; RV32-NEXT: mul a0, a0, a1 ; RV32-NEXT: add sp, sp, a0 +; RV32-NEXT: .cfi_def_cfa sp, 16 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; @@ -5917,6 +5920,7 @@ ; RV32-NEXT: li a1, 56 ; RV32-NEXT: mul a0, a0, a1 ; RV32-NEXT: add sp, sp, a0 +; RV32-NEXT: .cfi_def_cfa sp, 16 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; @@ -6028,6 +6032,7 @@ ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 4 ; RV64-NEXT: add sp, sp, a0 +; RV64-NEXT: .cfi_def_cfa sp, 16 ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: ret %v = call <32 x i64> @llvm.vp.ctlz.v32i64(<32 x i64> %va, i1 true, <32 x i1> %m, i32 %evl) @@ -6182,6 +6187,7 @@ ; RV32-NEXT: li a1, 40 ; RV32-NEXT: mul a0, a0, a1 ; RV32-NEXT: add sp, sp, a0 +; RV32-NEXT: .cfi_def_cfa sp, 16 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop-vp.ll @@ -2065,6 +2065,7 @@ ; RV32-NEXT: li a1, 56 ; RV32-NEXT: mul a0, a0, a1 ; RV32-NEXT: add sp, sp, a0 +; RV32-NEXT: .cfi_def_cfa sp, 16 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; @@ -2149,6 +2150,7 @@ ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 4 ; RV64-NEXT: add sp, sp, a0 +; RV64-NEXT: .cfi_def_cfa sp, 16 ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: ret %v = call <32 x i64> @llvm.vp.ctpop.v32i64(<32 x i64> %va, <32 x i1> %m, i32 %evl) @@ -2260,6 +2262,7 @@ ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 5 ; RV32-NEXT: add sp, sp, a0 +; RV32-NEXT: .cfi_def_cfa sp, 16 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz-vp.ll @@ -2388,6 +2388,7 @@ ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 6 ; RV32-NEXT: add sp, sp, a0 +; RV32-NEXT: .cfi_def_cfa sp, 16 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; @@ -2479,6 +2480,7 @@ ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 4 ; RV64-NEXT: add sp, sp, a0 +; RV64-NEXT: .cfi_def_cfa sp, 16 ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: ret %v = call <32 x i64> @llvm.vp.cttz.v32i64(<32 x i64> %va, i1 false, <32 x i1> %m, i32 %evl) @@ -2613,6 +2615,7 @@ ; RV32-NEXT: li a1, 40 ; RV32-NEXT: mul a0, a0, a1 ; RV32-NEXT: add sp, sp, a0 +; RV32-NEXT: .cfi_def_cfa sp, 16 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; @@ -5033,6 +5036,7 @@ ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 6 ; RV32-NEXT: add sp, sp, a0 +; RV32-NEXT: .cfi_def_cfa sp, 16 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; @@ -5124,6 +5128,7 @@ ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 4 ; RV64-NEXT: add sp, sp, a0 +; RV64-NEXT: .cfi_def_cfa sp, 16 ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: ret %v = call <32 x i64> @llvm.vp.cttz.v32i64(<32 x i64> %va, i1 true, <32 x i1> %m, i32 %evl) @@ -5258,6 +5263,7 @@ ; RV32-NEXT: li a1, 40 ; RV32-NEXT: mul a0, a0, a1 ; RV32-NEXT: add sp, sp, a0 +; RV32-NEXT: .cfi_def_cfa sp, 16 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll @@ -659,6 +659,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %v = call <32 x double> @llvm.vp.floor.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-interleave.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-interleave.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-interleave.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-interleave.ll @@ -298,6 +298,7 @@ ; RV32-V128-NEXT: csrr a0, vlenb ; RV32-V128-NEXT: slli a0, a0, 4 ; RV32-V128-NEXT: add sp, sp, a0 +; RV32-V128-NEXT: .cfi_def_cfa sp, 16 ; RV32-V128-NEXT: addi sp, sp, 16 ; RV32-V128-NEXT: ret ; @@ -349,6 +350,7 @@ ; RV64-V128-NEXT: csrr a0, vlenb ; RV64-V128-NEXT: slli a0, a0, 4 ; RV64-V128-NEXT: add sp, sp, a0 +; RV64-V128-NEXT: .cfi_def_cfa sp, 16 ; RV64-V128-NEXT: addi sp, sp, 16 ; RV64-V128-NEXT: ret ; diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fshr-fshl-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fshr-fshl-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fshr-fshl-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fshr-fshl-vp.ll @@ -824,6 +824,7 @@ ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 3 ; RV32-NEXT: add sp, sp, a0 +; RV32-NEXT: .cfi_def_cfa sp, 16 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; @@ -853,6 +854,7 @@ ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 3 ; RV64-NEXT: add sp, sp, a0 +; RV64-NEXT: .cfi_def_cfa sp, 16 ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: ret %res = call <16 x i64> @llvm.vp.fshr.v16i64(<16 x i64> %a, <16 x i64> %b, <16 x i64> %c, <16 x i1> %m, i32 %evl) @@ -891,6 +893,7 @@ ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 3 ; RV32-NEXT: add sp, sp, a0 +; RV32-NEXT: .cfi_def_cfa sp, 16 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; @@ -920,6 +923,7 @@ ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 3 ; RV64-NEXT: add sp, sp, a0 +; RV64-NEXT: .cfi_def_cfa sp, 16 ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: ret %res = call <16 x i64> @llvm.vp.fshl.v16i64(<16 x i64> %a, <16 x i64> %b, <16 x i64> %c, <16 x i1> %m, i32 %evl) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll @@ -580,6 +580,7 @@ ; RV32-NEXT: vs8r.v v8, (a0) ; RV32-NEXT: vs8r.v v16, (a1) ; RV32-NEXT: addi sp, s0, -80 +; RV32-NEXT: .cfi_def_cfa sp, 80 ; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 80 @@ -613,6 +614,7 @@ ; RV64-NEXT: vs8r.v v8, (a0) ; RV64-NEXT: vs8r.v v16, (a1) ; RV64-NEXT: addi sp, s0, -80 +; RV64-NEXT: .cfi_def_cfa sp, 80 ; RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; RV64-NEXT: addi sp, sp, 80 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-interleave.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-interleave.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-interleave.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-interleave.ll @@ -457,6 +457,7 @@ ; RV32-V128-NEXT: csrr a0, vlenb ; RV32-V128-NEXT: slli a0, a0, 4 ; RV32-V128-NEXT: add sp, sp, a0 +; RV32-V128-NEXT: .cfi_def_cfa sp, 16 ; RV32-V128-NEXT: addi sp, sp, 16 ; RV32-V128-NEXT: ret ; @@ -508,6 +509,7 @@ ; RV64-V128-NEXT: csrr a0, vlenb ; RV64-V128-NEXT: slli a0, a0, 4 ; RV64-V128-NEXT: add sp, sp, a0 +; RV64-V128-NEXT: .cfi_def_cfa sp, 16 ; RV64-V128-NEXT: addi sp, sp, 16 ; RV64-V128-NEXT: ret ; diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll @@ -648,6 +648,7 @@ ; RV32-NEXT: li a1, 78 ; RV32-NEXT: mul a0, a0, a1 ; RV32-NEXT: add sp, sp, a0 +; RV32-NEXT: .cfi_def_cfa sp, 16 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; @@ -1245,6 +1246,7 @@ ; RV64-NEXT: li a1, 92 ; RV64-NEXT: mul a0, a0, a1 ; RV64-NEXT: add sp, sp, a0 +; RV64-NEXT: .cfi_def_cfa sp, 16 ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: ret %interleaved.vec = load <48 x i64>, ptr %ptr diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll @@ -1596,6 +1596,7 @@ ; RV32-NEXT: csrr a2, vlenb ; RV32-NEXT: slli a2, a2, 4 ; RV32-NEXT: add sp, sp, a2 +; RV32-NEXT: .cfi_def_cfa sp, 16 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; @@ -1641,6 +1642,7 @@ ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: slli a1, a1, 4 ; RV64-NEXT: add sp, sp, a1 +; RV64-NEXT: .cfi_def_cfa sp, 16 ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: ret %v = load <64 x i32>, ptr %x @@ -1695,6 +1697,7 @@ ; RV32-NEXT: csrr a2, vlenb ; RV32-NEXT: slli a2, a2, 4 ; RV32-NEXT: add sp, sp, a2 +; RV32-NEXT: .cfi_def_cfa sp, 16 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; @@ -1740,6 +1743,7 @@ ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: slli a1, a1, 4 ; RV64-NEXT: add sp, sp, a1 +; RV64-NEXT: .cfi_def_cfa sp, 16 ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: ret %v = load <64 x i32>, ptr %x diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll @@ -592,6 +592,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %v = call <32 x double> @llvm.vp.rint.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll @@ -659,6 +659,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %v = call <32 x double> @llvm.vp.round.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll @@ -659,6 +659,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %v = call <32 x double> @llvm.vp.roundeven.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll @@ -659,6 +659,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %v = call <32 x double> @llvm.vp.roundtozero.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-fp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-fp-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-fp-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-fp-vp.ll @@ -590,6 +590,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %v = call <128 x i1> @llvm.vp.fcmp.v128f16(<128 x half> %va, <128 x half> %vb, metadata !"oeq", <128 x i1> %m, i32 %evl) @@ -1201,6 +1202,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %v = call <32 x i1> @llvm.vp.fcmp.v32f64(<32 x double> %va, <32 x double> %vb, metadata !"oeq", <32 x i1> %m, i32 %evl) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll @@ -673,6 +673,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %v = call <256 x i1> @llvm.vp.icmp.v256i8(<256 x i8> %va, <256 x i8> %vb, metadata !"eq", <256 x i1> %m, i32 %evl) @@ -1364,6 +1365,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %v = call <64 x i1> @llvm.vp.icmp.v64i32(<64 x i32> %va, <64 x i32> %vb, metadata !"eq", <64 x i1> %m, i32 %evl) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-vp.ll @@ -480,6 +480,7 @@ ; CHECK-NEXT: li a1, 56 ; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %v = call <128 x i32> @llvm.vp.trunc.v128i32.v128i64(<128 x i64> %a, <128 x i1> %m, i32 %vl) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vcopysign-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vcopysign-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vcopysign-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vcopysign-vp.ll @@ -365,6 +365,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %v = call <32 x double> @llvm.vp.copysign.v32f64(<32 x double> %va, <32 x double> %vb, <32 x i1> %m, i32 %evl) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfma-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfma-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfma-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfma-vp.ll @@ -745,6 +745,7 @@ ; CHECK-NEXT: li a1, 40 ; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %v = call <32 x double> @llvm.vp.fma.v32f64(<32 x double> %va, <32 x double> %b, <32 x double> %c, <32 x i1> %m, i32 %evl) @@ -812,6 +813,7 @@ ; CHECK-NEXT: li a1, 24 ; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %head = insertelement <32 x i1> poison, i1 true, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax-vp.ll @@ -365,6 +365,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %v = call <32 x double> @llvm.vp.maxnum.v32f64(<32 x double> %va, <32 x double> %vb, <32 x i1> %m, i32 %evl) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin-vp.ll @@ -365,6 +365,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %v = call <32 x double> @llvm.vp.minnum.v32f64(<32 x double> %va, <32 x double> %vb, <32 x i1> %m, i32 %evl) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmuladd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmuladd-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmuladd-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmuladd-vp.ll @@ -745,6 +745,7 @@ ; CHECK-NEXT: li a1, 40 ; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %v = call <32 x double> @llvm.vp.fmuladd.v32f64(<32 x double> %va, <32 x double> %b, <32 x double> %c, <32 x i1> %m, i32 %evl) @@ -812,6 +813,7 @@ ; CHECK-NEXT: li a1, 24 ; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %head = insertelement <32 x i1> poison, i1 true, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwadd.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwadd.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwadd.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwadd.ll @@ -112,6 +112,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %a = load <64 x half>, ptr %x @@ -211,6 +212,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %a = load <32 x float>, ptr %x diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwmul.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwmul.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwmul.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwmul.ll @@ -112,6 +112,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %a = load <64 x half>, ptr %x @@ -211,6 +212,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %a = load <32 x float>, ptr %x diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwsub.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwsub.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwsub.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwsub.ll @@ -112,6 +112,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %a = load <64 x half>, ptr %x @@ -211,6 +212,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %a = load <32 x float>, ptr %x diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll @@ -2362,6 +2362,7 @@ ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 3 ; RV64-NEXT: add sp, sp, a0 +; RV64-NEXT: .cfi_def_cfa sp, 16 ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: ret %ptrs = getelementptr inbounds double, ptr %base, <32 x i32> %idxs diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge.ll @@ -1103,6 +1103,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %v = call <32 x double> @llvm.vp.merge.v32f64(<32 x i1> %m, <32 x double> %va, <32 x double> %vb, i32 %evl) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll @@ -1748,6 +1748,7 @@ ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 3 ; RV64-NEXT: add sp, sp, a0 +; RV64-NEXT: .cfi_def_cfa sp, 16 ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: ret call void @llvm.vp.scatter.v32f64.v32p0(<32 x double> %val, <32 x ptr> %ptrs, <32 x i1> %m, i32 %evl) @@ -1832,6 +1833,7 @@ ; RV64-NEXT: li a1, 10 ; RV64-NEXT: mul a0, a0, a1 ; RV64-NEXT: add sp, sp, a0 +; RV64-NEXT: .cfi_def_cfa sp, 16 ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: ret %ptrs = getelementptr inbounds double, ptr %base, <32 x i32> %idxs @@ -1918,6 +1920,7 @@ ; RV64-NEXT: li a1, 10 ; RV64-NEXT: mul a0, a0, a1 ; RV64-NEXT: add sp, sp, a0 +; RV64-NEXT: .cfi_def_cfa sp, 16 ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: ret %eidxs = sext <32 x i32> %idxs to <32 x i64> @@ -2005,6 +2008,7 @@ ; RV64-NEXT: li a1, 10 ; RV64-NEXT: mul a0, a0, a1 ; RV64-NEXT: add sp, sp, a0 +; RV64-NEXT: .cfi_def_cfa sp, 16 ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: ret %eidxs = zext <32 x i32> %idxs to <32 x i64> diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vscale-range.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vscale-range.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vscale-range.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vscale-range.ll @@ -95,6 +95,7 @@ ; CHECK-NEXT: li a1, 40 ; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %c = add <512 x i8> %a, %b diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp.ll @@ -187,6 +187,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %v = call <256 x i8> @llvm.vp.select.v256i8(<256 x i1> %a, <256 x i8> %b, <256 x i8> %c, i32 %evl) @@ -245,6 +246,7 @@ ; CHECK-NEXT: li a1, 24 ; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %v = call <256 x i8> @llvm.vp.select.v256i8(<256 x i1> %a, <256 x i8> %b, <256 x i8> %c, i32 129) @@ -444,6 +446,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %v = call <32 x i64> @llvm.vp.select.v32i64(<32 x i1> %a, <32 x i64> %b, <32 x i64> %c, i32 %evl) @@ -488,6 +491,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %v = call <32 x i64> @llvm.vp.select.v32i64(<32 x i1> %a, <32 x i64> %b, <32 x i64> %c, i32 17) @@ -628,6 +632,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %v = call <64 x float> @llvm.vp.select.v64f32(<64 x i1> %a, <64 x float> %b, <64 x float> %c, i32 %evl) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll @@ -296,6 +296,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %a = load <128 x i8>, ptr %x @@ -333,6 +334,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %a = load <64 x i16>, ptr %x @@ -368,6 +370,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %a = load <32 x i32>, ptr %x diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulsu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulsu.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulsu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulsu.ll @@ -288,6 +288,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %a = load <128 x i8>, ptr %x @@ -325,6 +326,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %a = load <64 x i16>, ptr %x @@ -360,6 +362,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %a = load <32 x i32>, ptr %x diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll @@ -272,6 +272,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %a = load <128 x i8>, ptr %x @@ -309,6 +310,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %a = load <64 x i16>, ptr %x @@ -344,6 +346,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %a = load <32 x i32>, ptr %x diff --git a/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll @@ -790,6 +790,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %v = call @llvm.vp.floor.nxv16f64( %va, %m, i32 %evl) diff --git a/llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll b/llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll --- a/llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll @@ -505,6 +505,7 @@ ; CHECK-V-NEXT: csrr a0, vlenb ; CHECK-V-NEXT: slli a0, a0, 1 ; CHECK-V-NEXT: add sp, sp, a0 +; CHECK-V-NEXT: .cfi_def_cfa sp, 48 ; CHECK-V-NEXT: ld ra, 40(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s0, 32(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s1, 24(sp) # 8-byte Folded Reload @@ -662,6 +663,7 @@ ; CHECK-V-NEXT: csrr a0, vlenb ; CHECK-V-NEXT: slli a0, a0, 1 ; CHECK-V-NEXT: add sp, sp, a0 +; CHECK-V-NEXT: .cfi_def_cfa sp, 48 ; CHECK-V-NEXT: ld ra, 40(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s0, 32(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s1, 24(sp) # 8-byte Folded Reload @@ -830,6 +832,7 @@ ; CHECK-V-NEXT: csrr a0, vlenb ; CHECK-V-NEXT: slli a0, a0, 1 ; CHECK-V-NEXT: add sp, sp, a0 +; CHECK-V-NEXT: .cfi_def_cfa sp, 48 ; CHECK-V-NEXT: ld ra, 40(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s0, 32(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s1, 24(sp) # 8-byte Folded Reload @@ -1493,6 +1496,7 @@ ; CHECK-V-NEXT: csrr a0, vlenb ; CHECK-V-NEXT: slli a0, a0, 1 ; CHECK-V-NEXT: add sp, sp, a0 +; CHECK-V-NEXT: .cfi_def_cfa sp, 80 ; CHECK-V-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s1, 56(sp) # 8-byte Folded Reload @@ -1774,6 +1778,7 @@ ; CHECK-V-NEXT: csrr a0, vlenb ; CHECK-V-NEXT: slli a0, a0, 1 ; CHECK-V-NEXT: add sp, sp, a0 +; CHECK-V-NEXT: .cfi_def_cfa sp, 80 ; CHECK-V-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s1, 56(sp) # 8-byte Folded Reload @@ -2078,6 +2083,7 @@ ; CHECK-V-NEXT: csrr a0, vlenb ; CHECK-V-NEXT: slli a0, a0, 1 ; CHECK-V-NEXT: add sp, sp, a0 +; CHECK-V-NEXT: .cfi_def_cfa sp, 80 ; CHECK-V-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s1, 56(sp) # 8-byte Folded Reload @@ -2266,6 +2272,7 @@ ; CHECK-V-NEXT: csrr a0, vlenb ; CHECK-V-NEXT: slli a0, a0, 1 ; CHECK-V-NEXT: add sp, sp, a0 +; CHECK-V-NEXT: .cfi_def_cfa sp, 64 ; CHECK-V-NEXT: ld ra, 56(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s0, 48(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s1, 40(sp) # 8-byte Folded Reload @@ -2354,6 +2361,7 @@ ; CHECK-V-NEXT: csrr a0, vlenb ; CHECK-V-NEXT: slli a0, a0, 1 ; CHECK-V-NEXT: add sp, sp, a0 +; CHECK-V-NEXT: .cfi_def_cfa sp, 64 ; CHECK-V-NEXT: ld ra, 56(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s0, 48(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s1, 40(sp) # 8-byte Folded Reload @@ -2493,6 +2501,7 @@ ; CHECK-V-NEXT: csrr a0, vlenb ; CHECK-V-NEXT: slli a0, a0, 1 ; CHECK-V-NEXT: add sp, sp, a0 +; CHECK-V-NEXT: .cfi_def_cfa sp, 64 ; CHECK-V-NEXT: ld ra, 56(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s0, 48(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s1, 40(sp) # 8-byte Folded Reload @@ -2674,6 +2683,7 @@ ; CHECK-V-NEXT: csrr a0, vlenb ; CHECK-V-NEXT: slli a0, a0, 1 ; CHECK-V-NEXT: add sp, sp, a0 +; CHECK-V-NEXT: .cfi_def_cfa sp, 64 ; CHECK-V-NEXT: ld ra, 56(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s0, 48(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s1, 40(sp) # 8-byte Folded Reload @@ -2762,6 +2772,7 @@ ; CHECK-V-NEXT: csrr a0, vlenb ; CHECK-V-NEXT: slli a0, a0, 1 ; CHECK-V-NEXT: add sp, sp, a0 +; CHECK-V-NEXT: .cfi_def_cfa sp, 64 ; CHECK-V-NEXT: ld ra, 56(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s0, 48(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s1, 40(sp) # 8-byte Folded Reload @@ -2901,6 +2912,7 @@ ; CHECK-V-NEXT: csrr a0, vlenb ; CHECK-V-NEXT: slli a0, a0, 1 ; CHECK-V-NEXT: add sp, sp, a0 +; CHECK-V-NEXT: .cfi_def_cfa sp, 64 ; CHECK-V-NEXT: ld ra, 56(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s0, 48(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s1, 40(sp) # 8-byte Folded Reload @@ -3801,6 +3813,7 @@ ; CHECK-V-NEXT: csrr a0, vlenb ; CHECK-V-NEXT: slli a0, a0, 1 ; CHECK-V-NEXT: add sp, sp, a0 +; CHECK-V-NEXT: .cfi_def_cfa sp, 48 ; CHECK-V-NEXT: ld ra, 40(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s0, 32(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s1, 24(sp) # 8-byte Folded Reload @@ -3956,6 +3969,7 @@ ; CHECK-V-NEXT: csrr a0, vlenb ; CHECK-V-NEXT: slli a0, a0, 1 ; CHECK-V-NEXT: add sp, sp, a0 +; CHECK-V-NEXT: .cfi_def_cfa sp, 48 ; CHECK-V-NEXT: ld ra, 40(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s0, 32(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s1, 24(sp) # 8-byte Folded Reload @@ -4123,6 +4137,7 @@ ; CHECK-V-NEXT: csrr a0, vlenb ; CHECK-V-NEXT: slli a0, a0, 1 ; CHECK-V-NEXT: add sp, sp, a0 +; CHECK-V-NEXT: .cfi_def_cfa sp, 48 ; CHECK-V-NEXT: ld ra, 40(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s0, 32(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s1, 24(sp) # 8-byte Folded Reload @@ -4774,6 +4789,7 @@ ; CHECK-V-NEXT: csrr a0, vlenb ; CHECK-V-NEXT: slli a0, a0, 1 ; CHECK-V-NEXT: add sp, sp, a0 +; CHECK-V-NEXT: .cfi_def_cfa sp, 80 ; CHECK-V-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s1, 56(sp) # 8-byte Folded Reload @@ -5051,6 +5067,7 @@ ; CHECK-V-NEXT: csrr a0, vlenb ; CHECK-V-NEXT: slli a0, a0, 1 ; CHECK-V-NEXT: add sp, sp, a0 +; CHECK-V-NEXT: .cfi_def_cfa sp, 80 ; CHECK-V-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s1, 56(sp) # 8-byte Folded Reload @@ -5354,6 +5371,7 @@ ; CHECK-V-NEXT: csrr a0, vlenb ; CHECK-V-NEXT: slli a0, a0, 1 ; CHECK-V-NEXT: add sp, sp, a0 +; CHECK-V-NEXT: .cfi_def_cfa sp, 80 ; CHECK-V-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s1, 56(sp) # 8-byte Folded Reload @@ -5546,6 +5564,7 @@ ; CHECK-V-NEXT: csrr a0, vlenb ; CHECK-V-NEXT: slli a0, a0, 1 ; CHECK-V-NEXT: add sp, sp, a0 +; CHECK-V-NEXT: .cfi_def_cfa sp, 64 ; CHECK-V-NEXT: ld ra, 56(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s0, 48(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s1, 40(sp) # 8-byte Folded Reload @@ -5632,6 +5651,7 @@ ; CHECK-V-NEXT: csrr a0, vlenb ; CHECK-V-NEXT: slli a0, a0, 1 ; CHECK-V-NEXT: add sp, sp, a0 +; CHECK-V-NEXT: .cfi_def_cfa sp, 64 ; CHECK-V-NEXT: ld ra, 56(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s0, 48(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s1, 40(sp) # 8-byte Folded Reload @@ -5747,6 +5767,7 @@ ; CHECK-V-NEXT: csrr a0, vlenb ; CHECK-V-NEXT: slli a0, a0, 1 ; CHECK-V-NEXT: add sp, sp, a0 +; CHECK-V-NEXT: .cfi_def_cfa sp, 64 ; CHECK-V-NEXT: ld ra, 56(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s0, 48(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s1, 40(sp) # 8-byte Folded Reload @@ -5932,6 +5953,7 @@ ; CHECK-V-NEXT: csrr a0, vlenb ; CHECK-V-NEXT: slli a0, a0, 1 ; CHECK-V-NEXT: add sp, sp, a0 +; CHECK-V-NEXT: .cfi_def_cfa sp, 64 ; CHECK-V-NEXT: ld ra, 56(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s0, 48(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s1, 40(sp) # 8-byte Folded Reload @@ -6018,6 +6040,7 @@ ; CHECK-V-NEXT: csrr a0, vlenb ; CHECK-V-NEXT: slli a0, a0, 1 ; CHECK-V-NEXT: add sp, sp, a0 +; CHECK-V-NEXT: .cfi_def_cfa sp, 64 ; CHECK-V-NEXT: ld ra, 56(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s0, 48(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s1, 40(sp) # 8-byte Folded Reload @@ -6133,6 +6156,7 @@ ; CHECK-V-NEXT: csrr a0, vlenb ; CHECK-V-NEXT: slli a0, a0, 1 ; CHECK-V-NEXT: add sp, sp, a0 +; CHECK-V-NEXT: .cfi_def_cfa sp, 64 ; CHECK-V-NEXT: ld ra, 56(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s0, 48(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s1, 40(sp) # 8-byte Folded Reload diff --git a/llvm/test/CodeGen/RISCV/rvv/fshr-fshl-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fshr-fshl-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fshr-fshl-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fshr-fshl-vp.ll @@ -231,6 +231,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %res = call @llvm.vp.fshr.nxv64i8( %a, %b, %c, %m, i32 %evl) @@ -262,6 +263,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %res = call @llvm.vp.fshl.nxv64i8( %a, %b, %c, %m, i32 %evl) @@ -463,6 +465,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %res = call @llvm.vp.fshr.nxv32i16( %a, %b, %c, %m, i32 %evl) @@ -494,6 +497,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %res = call @llvm.vp.fshl.nxv32i16( %a, %b, %c, %m, i32 %evl) @@ -671,6 +675,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %res = call @llvm.vp.fshr.nxv16i32( %a, %b, %c, %m, i32 %evl) @@ -705,6 +710,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %res = call @llvm.vp.fshl.nxv16i32( %a, %b, %c, %m, i32 %evl) @@ -846,6 +852,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %res = call @llvm.vp.fshr.nxv7i64( %a, %b, %c, %m, i32 %evl) @@ -880,6 +887,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %res = call @llvm.vp.fshl.nxv7i64( %a, %b, %c, %m, i32 %evl) @@ -913,6 +921,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %res = call @llvm.vp.fshr.nxv8i64( %a, %b, %c, %m, i32 %evl) @@ -947,6 +956,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %res = call @llvm.vp.fshl.nxv8i64( %a, %b, %c, %m, i32 %evl) @@ -1137,6 +1147,7 @@ ; CHECK-NEXT: li a1, 56 ; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %res = call @llvm.vp.fshr.nxv16i64( %a, %b, %c, %m, i32 %evl) @@ -1325,6 +1336,7 @@ ; CHECK-NEXT: li a1, 56 ; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %res = call @llvm.vp.fshl.nxv16i64( %a, %b, %c, %m, i32 %evl) diff --git a/llvm/test/CodeGen/RISCV/rvv/get-vlen-debugloc.mir b/llvm/test/CodeGen/RISCV/rvv/get-vlen-debugloc.mir --- a/llvm/test/CodeGen/RISCV/rvv/get-vlen-debugloc.mir +++ b/llvm/test/CodeGen/RISCV/rvv/get-vlen-debugloc.mir @@ -36,6 +36,7 @@ ; CHECK-NEXT: $x10 = frame-destroy PseudoReadVLENB ; CHECK-NEXT: $x10 = frame-destroy SLLI killed $x10, 1 ; CHECK-NEXT: $x2 = frame-destroy ADD $x2, killed $x10 + ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $x2, 0 ; CHECK-NEXT: PseudoRET bb.0: bb.1: diff --git a/llvm/test/CodeGen/RISCV/rvv/large-rvv-stack-size.mir b/llvm/test/CodeGen/RISCV/rvv/large-rvv-stack-size.mir --- a/llvm/test/CodeGen/RISCV/rvv/large-rvv-stack-size.mir +++ b/llvm/test/CodeGen/RISCV/rvv/large-rvv-stack-size.mir @@ -35,6 +35,7 @@ ; CHECK-NEXT: call spillslot@plt ; CHECK-NEXT: addi sp, s0, -2048 ; CHECK-NEXT: addi sp, sp, -256 + ; CHECK-NEXT: .cfi_def_cfa sp, 2304 ; CHECK-NEXT: addi sp, sp, 272 ; CHECK-NEXT: ld ra, 2024(sp) # 8-byte Folded Reload ; CHECK-NEXT: ld s0, 2016(sp) # 8-byte Folded Reload diff --git a/llvm/test/CodeGen/RISCV/rvv/localvar.ll b/llvm/test/CodeGen/RISCV/rvv/localvar.ll --- a/llvm/test/CodeGen/RISCV/rvv/localvar.ll +++ b/llvm/test/CodeGen/RISCV/rvv/localvar.ll @@ -21,6 +21,7 @@ ; RV64IV-NEXT: csrr a0, vlenb ; RV64IV-NEXT: slli a0, a0, 1 ; RV64IV-NEXT: add sp, sp, a0 +; RV64IV-NEXT: .cfi_def_cfa sp, 16 ; RV64IV-NEXT: addi sp, sp, 16 ; RV64IV-NEXT: ret %local0 = alloca @@ -48,6 +49,7 @@ ; RV64IV-NEXT: csrr a0, vlenb ; RV64IV-NEXT: slli a0, a0, 1 ; RV64IV-NEXT: add sp, sp, a0 +; RV64IV-NEXT: .cfi_def_cfa sp, 16 ; RV64IV-NEXT: addi sp, sp, 16 ; RV64IV-NEXT: ret %local0 = alloca @@ -76,6 +78,7 @@ ; RV64IV-NEXT: csrr a0, vlenb ; RV64IV-NEXT: slli a0, a0, 2 ; RV64IV-NEXT: add sp, sp, a0 +; RV64IV-NEXT: .cfi_def_cfa sp, 16 ; RV64IV-NEXT: addi sp, sp, 16 ; RV64IV-NEXT: ret %local0 = alloca @@ -108,6 +111,7 @@ ; RV64IV-NEXT: addi a0, sp, 32 ; RV64IV-NEXT: vl4r.v v8, (a0) ; RV64IV-NEXT: addi sp, s0, -48 +; RV64IV-NEXT: .cfi_def_cfa sp, 48 ; RV64IV-NEXT: ld ra, 40(sp) # 8-byte Folded Reload ; RV64IV-NEXT: ld s0, 32(sp) # 8-byte Folded Reload ; RV64IV-NEXT: addi sp, sp, 48 @@ -142,6 +146,7 @@ ; RV64IV-NEXT: addi a0, sp, 64 ; RV64IV-NEXT: vl8r.v v8, (a0) ; RV64IV-NEXT: addi sp, s0, -80 +; RV64IV-NEXT: .cfi_def_cfa sp, 80 ; RV64IV-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; RV64IV-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; RV64IV-NEXT: addi sp, sp, 80 @@ -174,6 +179,7 @@ ; RV64IV-NEXT: csrr a0, vlenb ; RV64IV-NEXT: slli a0, a0, 2 ; RV64IV-NEXT: add sp, sp, a0 +; RV64IV-NEXT: .cfi_def_cfa sp, 16 ; RV64IV-NEXT: addi sp, sp, 16 ; RV64IV-NEXT: ret %local_scalar0 = alloca i32 @@ -223,6 +229,7 @@ ; RV64IV-NEXT: addi a0, a0, -32 ; RV64IV-NEXT: vl2r.v v8, (a0) ; RV64IV-NEXT: addi sp, s0, -32 +; RV64IV-NEXT: .cfi_def_cfa sp, 32 ; RV64IV-NEXT: ld ra, 24(sp) # 8-byte Folded Reload ; RV64IV-NEXT: ld s0, 16(sp) # 8-byte Folded Reload ; RV64IV-NEXT: ld s1, 8(sp) # 8-byte Folded Reload @@ -277,6 +284,7 @@ ; RV64IV-NEXT: vl2r.v v8, (a0) ; RV64IV-NEXT: lw a0, 120(s1) ; RV64IV-NEXT: addi sp, s0, -256 +; RV64IV-NEXT: .cfi_def_cfa sp, 256 ; RV64IV-NEXT: ld ra, 248(sp) # 8-byte Folded Reload ; RV64IV-NEXT: ld s0, 240(sp) # 8-byte Folded Reload ; RV64IV-NEXT: ld s1, 232(sp) # 8-byte Folded Reload diff --git a/llvm/test/CodeGen/RISCV/rvv/memory-args.ll b/llvm/test/CodeGen/RISCV/rvv/memory-args.ll --- a/llvm/test/CodeGen/RISCV/rvv/memory-args.ll +++ b/llvm/test/CodeGen/RISCV/rvv/memory-args.ll @@ -60,6 +60,7 @@ ; RV64IV-NEXT: vs8r.v v24, (a1) ; RV64IV-NEXT: call callee@plt ; RV64IV-NEXT: addi sp, s0, -80 +; RV64IV-NEXT: .cfi_def_cfa sp, 80 ; RV64IV-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; RV64IV-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; RV64IV-NEXT: addi sp, sp, 80 diff --git a/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll @@ -1252,6 +1252,7 @@ ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 3 ; RV64-NEXT: add sp, sp, a0 +; RV64-NEXT: .cfi_def_cfa sp, 16 ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: ret %p0 = call @llvm.vector.insert.nxv8p0.nxv16p0( undef, %ptrs0, i64 0) diff --git a/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll @@ -1742,6 +1742,7 @@ ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 3 ; RV64-NEXT: add sp, sp, a0 +; RV64-NEXT: .cfi_def_cfa sp, 16 ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: ret %p0 = call @llvm.vector.insert.nxv8p0.nxv16p0( undef, %ptrs0, i64 0) diff --git a/llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll b/llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll --- a/llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll +++ b/llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll @@ -1697,6 +1697,7 @@ ; RV32-BITS-UNKNOWN-NEXT: vl8re64.v v16, (a0) ; RV32-BITS-UNKNOWN-NEXT: vl8re64.v v8, (a1) ; RV32-BITS-UNKNOWN-NEXT: addi sp, s0, -80 +; RV32-BITS-UNKNOWN-NEXT: .cfi_def_cfa sp, 80 ; RV32-BITS-UNKNOWN-NEXT: lw ra, 76(sp) # 4-byte Folded Reload ; RV32-BITS-UNKNOWN-NEXT: lw s0, 72(sp) # 4-byte Folded Reload ; RV32-BITS-UNKNOWN-NEXT: addi sp, sp, 80 @@ -1733,6 +1734,7 @@ ; RV32-BITS-256-NEXT: vl8re64.v v16, (a0) ; RV32-BITS-256-NEXT: vl8re64.v v8, (a1) ; RV32-BITS-256-NEXT: addi sp, s0, -80 +; RV32-BITS-256-NEXT: .cfi_def_cfa sp, 80 ; RV32-BITS-256-NEXT: lw ra, 76(sp) # 4-byte Folded Reload ; RV32-BITS-256-NEXT: lw s0, 72(sp) # 4-byte Folded Reload ; RV32-BITS-256-NEXT: addi sp, sp, 80 @@ -1769,6 +1771,7 @@ ; RV32-BITS-512-NEXT: vl8re64.v v16, (a0) ; RV32-BITS-512-NEXT: vl8re64.v v8, (a1) ; RV32-BITS-512-NEXT: addi sp, s0, -80 +; RV32-BITS-512-NEXT: .cfi_def_cfa sp, 80 ; RV32-BITS-512-NEXT: lw ra, 76(sp) # 4-byte Folded Reload ; RV32-BITS-512-NEXT: lw s0, 72(sp) # 4-byte Folded Reload ; RV32-BITS-512-NEXT: addi sp, sp, 80 @@ -1805,6 +1808,7 @@ ; RV64-BITS-UNKNOWN-NEXT: vl8re64.v v16, (a0) ; RV64-BITS-UNKNOWN-NEXT: vl8re64.v v8, (a1) ; RV64-BITS-UNKNOWN-NEXT: addi sp, s0, -80 +; RV64-BITS-UNKNOWN-NEXT: .cfi_def_cfa sp, 80 ; RV64-BITS-UNKNOWN-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; RV64-BITS-UNKNOWN-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; RV64-BITS-UNKNOWN-NEXT: addi sp, sp, 80 @@ -1841,6 +1845,7 @@ ; RV64-BITS-256-NEXT: vl8re64.v v16, (a0) ; RV64-BITS-256-NEXT: vl8re64.v v8, (a1) ; RV64-BITS-256-NEXT: addi sp, s0, -80 +; RV64-BITS-256-NEXT: .cfi_def_cfa sp, 80 ; RV64-BITS-256-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; RV64-BITS-256-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; RV64-BITS-256-NEXT: addi sp, sp, 80 @@ -1877,6 +1882,7 @@ ; RV64-BITS-512-NEXT: vl8re64.v v16, (a0) ; RV64-BITS-512-NEXT: vl8re64.v v8, (a1) ; RV64-BITS-512-NEXT: addi sp, s0, -80 +; RV64-BITS-512-NEXT: .cfi_def_cfa sp, 80 ; RV64-BITS-512-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; RV64-BITS-512-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; RV64-BITS-512-NEXT: addi sp, sp, 80 diff --git a/llvm/test/CodeGen/RISCV/rvv/no-reserved-frame.ll b/llvm/test/CodeGen/RISCV/rvv/no-reserved-frame.ll --- a/llvm/test/CodeGen/RISCV/rvv/no-reserved-frame.ll +++ b/llvm/test/CodeGen/RISCV/rvv/no-reserved-frame.ll @@ -42,6 +42,7 @@ ; CHECK-NEXT: addi sp, sp, 32 ; CHECK-NEXT: li a0, 0 ; CHECK-NEXT: addi sp, s0, -96 +; CHECK-NEXT: .cfi_def_cfa sp, 96 ; CHECK-NEXT: ld ra, 88(sp) # 8-byte Folded Reload ; CHECK-NEXT: ld s0, 80(sp) # 8-byte Folded Reload ; CHECK-NEXT: ld s1, 72(sp) # 8-byte Folded Reload diff --git a/llvm/test/CodeGen/RISCV/rvv/reg-alloc-reserve-bp.ll b/llvm/test/CodeGen/RISCV/rvv/reg-alloc-reserve-bp.ll --- a/llvm/test/CodeGen/RISCV/rvv/reg-alloc-reserve-bp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/reg-alloc-reserve-bp.ll @@ -47,6 +47,7 @@ ; CHECK-NEXT: vfadd.vv v8, v10, v8 ; CHECK-NEXT: vse32.v v8, (s2) ; CHECK-NEXT: addi sp, s0, -192 +; CHECK-NEXT: .cfi_def_cfa sp, 192 ; CHECK-NEXT: ld ra, 184(sp) # 8-byte Folded Reload ; CHECK-NEXT: ld s0, 176(sp) # 8-byte Folded Reload ; CHECK-NEXT: ld s1, 168(sp) # 8-byte Folded Reload diff --git a/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll @@ -719,6 +719,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %v = call @llvm.vp.rint.nxv16f64( %va, %m, i32 %evl) diff --git a/llvm/test/CodeGen/RISCV/rvv/round-vp.ll b/llvm/test/CodeGen/RISCV/rvv/round-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/round-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/round-vp.ll @@ -790,6 +790,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %v = call @llvm.vp.round.nxv16f64( %va, %m, i32 %evl) diff --git a/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll b/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll @@ -790,6 +790,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %v = call @llvm.vp.roundeven.nxv16f64( %va, %m, i32 %evl) diff --git a/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll b/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll @@ -790,6 +790,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %v = call @llvm.vp.roundtozero.nxv16f64( %va, %m, i32 %evl) diff --git a/llvm/test/CodeGen/RISCV/rvv/rvv-args-by-mem.ll b/llvm/test/CodeGen/RISCV/rvv/rvv-args-by-mem.ll --- a/llvm/test/CodeGen/RISCV/rvv/rvv-args-by-mem.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rvv-args-by-mem.ll @@ -51,6 +51,7 @@ ; CHECK-NEXT: call bar@plt ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: addi sp, s0, -96 +; CHECK-NEXT: .cfi_def_cfa sp, 96 ; CHECK-NEXT: ld ra, 88(sp) # 8-byte Folded Reload ; CHECK-NEXT: ld s0, 80(sp) # 8-byte Folded Reload ; CHECK-NEXT: ld s1, 72(sp) # 8-byte Folded Reload diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll @@ -1133,6 +1133,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %v = call @llvm.vp.fcmp.nxv64f16( %va, %vb, metadata !"oeq", %m, i32 %evl) @@ -2372,6 +2373,7 @@ ; CHECK-NEXT: li a1, 48 ; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %v = call @llvm.vp.fcmp.nxv32f64( %va, %vb, metadata !"oeq", %m, i32 %evl) diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll @@ -1204,6 +1204,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %v = call @llvm.vp.icmp.nxv128i8( %va, %vb, metadata !"eq", %m, i32 %evl) @@ -2430,6 +2431,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %v = call @llvm.vp.icmp.nxv32i32( %va, %vb, metadata !"eq", %m, i32 %evl) diff --git a/llvm/test/CodeGen/RISCV/rvv/strided-vpstore.ll b/llvm/test/CodeGen/RISCV/rvv/strided-vpstore.ll --- a/llvm/test/CodeGen/RISCV/rvv/strided-vpstore.ll +++ b/llvm/test/CodeGen/RISCV/rvv/strided-vpstore.ll @@ -776,6 +776,7 @@ ; CHECK-RV32-NEXT: csrr a0, vlenb ; CHECK-RV32-NEXT: slli a0, a0, 3 ; CHECK-RV32-NEXT: add sp, sp, a0 +; CHECK-RV32-NEXT: .cfi_def_cfa sp, 16 ; CHECK-RV32-NEXT: addi sp, sp, 16 ; CHECK-RV32-NEXT: ret ; @@ -837,6 +838,7 @@ ; CHECK-RV64-NEXT: csrr a0, vlenb ; CHECK-RV64-NEXT: slli a0, a0, 3 ; CHECK-RV64-NEXT: add sp, sp, a0 +; CHECK-RV64-NEXT: .cfi_def_cfa sp, 16 ; CHECK-RV64-NEXT: addi sp, sp, 16 ; CHECK-RV64-NEXT: ret call void @llvm.experimental.vp.strided.store.nxv17f64.p0.i32( %v, ptr %ptr, i32 %stride, %mask, i32 %evl) diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll --- a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll @@ -151,6 +151,7 @@ ; CHECK-NEXT: li a1, 24 ; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %vec = load , ptr %p diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll --- a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll @@ -123,6 +123,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 1 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %retval = call {, } @llvm.experimental.vector.deinterleave2.nxv128i1( %vec) @@ -247,6 +248,7 @@ ; CHECK-NEXT: li a1, 40 ; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %retval = call {, } @llvm.experimental.vector.deinterleave2.nxv16i64( %vec) @@ -453,6 +455,7 @@ ; CHECK-NEXT: li a1, 40 ; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %retval = call {, } @llvm.experimental.vector.deinterleave2.nxv16f64( %vec) diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll b/llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll --- a/llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll @@ -140,6 +140,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.interleave2.nxv16i64( %a, %b) diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll b/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll --- a/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll @@ -200,6 +200,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.interleave2.nxv16i64( %a, %b) @@ -378,6 +379,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.interleave2.nxv16f64( %a, %b) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll @@ -1291,6 +1291,7 @@ ; CHECK-NEXT: li a1, 40 ; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %v = call @llvm.vp.fma.nxv16f64( %va, %b, %c, %m, i32 %evl) @@ -1356,6 +1357,7 @@ ; CHECK-NEXT: li a1, 24 ; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll @@ -1291,6 +1291,7 @@ ; CHECK-NEXT: li a1, 40 ; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %v = call @llvm.vp.fmuladd.nxv16f64( %va, %b, %c, %m, i32 %evl) @@ -1356,6 +1357,7 @@ ; CHECK-NEXT: li a1, 24 ; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll @@ -126,6 +126,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %v = call @llvm.vp.fptrunc.nxv16f64.nxv16f32( %a, %m, i32 %vl) @@ -211,6 +212,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %v = call @llvm.vp.fptrunc.nxv32f64.nxv32f32( %a, %m, i32 %vl) diff --git a/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll @@ -405,6 +405,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %v = call @llvm.vp.merge.nxv128i8( %m, %va, %vb, i32 %evl) diff --git a/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll @@ -2082,6 +2082,7 @@ ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 3 ; RV64-NEXT: add sp, sp, a0 +; RV64-NEXT: .cfi_def_cfa sp, 16 ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: ret call void @llvm.vp.scatter.nxv16f64.nxv16p0( %val, %ptrs, %m, i32 %evl) @@ -2162,6 +2163,7 @@ ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 4 ; RV64-NEXT: add sp, sp, a0 +; RV64-NEXT: .cfi_def_cfa sp, 16 ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: ret %ptrs = getelementptr inbounds double, ptr %base, %idxs @@ -2243,6 +2245,7 @@ ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 4 ; RV64-NEXT: add sp, sp, a0 +; RV64-NEXT: .cfi_def_cfa sp, 16 ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: ret %eidxs = sext %idxs to diff --git a/llvm/test/CodeGen/RISCV/rvv/vpstore.ll b/llvm/test/CodeGen/RISCV/rvv/vpstore.ll --- a/llvm/test/CodeGen/RISCV/rvv/vpstore.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vpstore.ll @@ -456,6 +456,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret call void @llvm.vp.store.nxv17f64.p0( %val, * %ptr, %m, i32 %evl) diff --git a/llvm/test/CodeGen/RISCV/rvv/vselect-fp.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-fp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vselect-fp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vselect-fp.ll @@ -427,6 +427,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %cond = icmp eq %va, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll @@ -393,6 +393,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %v = call @llvm.vp.select.nxv32i32( %a, %b, %c, i32 %evl) @@ -450,6 +451,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %evl = call i32 @llvm.vscale.i32() @@ -736,6 +738,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %v = call @llvm.vp.select.nxv16f64( %a, %b, %c, i32 %evl) diff --git a/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll @@ -356,6 +356,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret %v = call @llvm.vp.trunc.nxv32i32.nxv32i64( %a, %m, i32 %vl) diff --git a/llvm/test/CodeGen/RISCV/rvv/zvlsseg-spill.mir b/llvm/test/CodeGen/RISCV/rvv/zvlsseg-spill.mir --- a/llvm/test/CodeGen/RISCV/rvv/zvlsseg-spill.mir +++ b/llvm/test/CodeGen/RISCV/rvv/zvlsseg-spill.mir @@ -63,6 +63,7 @@ ; CHECK-NEXT: $x10 = frame-destroy PseudoReadVLENB ; CHECK-NEXT: $x10 = frame-destroy SLLI killed $x10, 3 ; CHECK-NEXT: $x2 = frame-destroy ADD $x2, killed $x10 + ; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $x2, 16 ; CHECK-NEXT: $x2 = frame-destroy ADDI $x2, 16 ; CHECK-NEXT: PseudoRET %0:gpr = COPY $x10