diff --git a/llvm/lib/Target/AArch64/AArch64FrameLowering.h b/llvm/lib/Target/AArch64/AArch64FrameLowering.h --- a/llvm/lib/Target/AArch64/AArch64FrameLowering.h +++ b/llvm/lib/Target/AArch64/AArch64FrameLowering.h @@ -141,13 +141,12 @@ int64_t assignSVEStackObjectOffsets(MachineFrameInfo &MF, int &MinCSFrameIndex, int &MaxCSFrameIndex) const; - MCCFIInstruction - createDefCFAExpressionFromSP(const TargetRegisterInfo &TRI, - const StackOffset &OffsetFromSP) const; - MCCFIInstruction createCfaOffset(const TargetRegisterInfo &MRI, unsigned DwarfReg, - const StackOffset &OffsetFromDefCFA) const; bool shouldCombineCSRLocalStackBumpInEpilogue(MachineBasicBlock &MBB, unsigned StackBumpBytes) const; + void emitCalleeSavedGPRLocations(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MBBI) const; + void emitCalleeSavedSVELocations(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MBBI) const; }; } // End llvm namespace diff --git a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp --- a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp @@ -117,6 +117,72 @@ // // FIXME: also explain the redzone concept. // +// An example of the prologue: +// +// .globl __foo +// .align 2 +// __foo: +// Ltmp0: +// .cfi_startproc +// .cfi_personality 155, ___gxx_personality_v0 +// Leh_func_begin: +// .cfi_lsda 16, Lexception33 +// +// stp xa,bx, [sp, -#offset]! +// ... +// stp x28, x27, [sp, #offset-32] +// stp fp, lr, [sp, #offset-16] +// add fp, sp, #offset - 16 +// sub sp, sp, #1360 +// +// The Stack: +// +-------------------------------------------+ +// 10000 | ........ | ........ | ........ | ........ | +// 10004 | ........ | ........ | ........ | ........ | +// +-------------------------------------------+ +// 10008 | ........ | ........ | ........ | ........ | +// 1000c | ........ | ........ | ........ | ........ | +// +===========================================+ +// 10010 | X28 Register | +// 10014 | X28 Register | +// +-------------------------------------------+ +// 10018 | X27 Register | +// 1001c | X27 Register | +// +===========================================+ +// 10020 | Frame Pointer | +// 10024 | Frame Pointer | +// +-------------------------------------------+ +// 10028 | Link Register | +// 1002c | Link Register | +// +===========================================+ +// 10030 | ........ | ........ | ........ | ........ | +// 10034 | ........ | ........ | ........ | ........ | +// +-------------------------------------------+ +// 10038 | ........ | ........ | ........ | ........ | +// 1003c | ........ | ........ | ........ | ........ | +// +-------------------------------------------+ +// +// [sp] = 10030 :: >>initial value<< +// sp = 10020 :: stp fp, lr, [sp, #-16]! +// fp = sp == 10020 :: mov fp, sp +// [sp] == 10020 :: stp x28, x27, [sp, #-16]! +// sp == 10010 :: >>final value<< +// +// The frame pointer (w29) points to address 10020. If we use an offset of +// '16' from 'w29', we get the CFI offsets of -8 for w30, -16 for w29, -24 +// for w27, and -32 for w28: +// +// Ltmp1: +// .cfi_def_cfa w29, 16 +// Ltmp2: +// .cfi_offset w30, -8 +// Ltmp3: +// .cfi_offset w29, -16 +// Ltmp4: +// .cfi_offset w27, -24 +// Ltmp5: +// .cfi_offset w28, -32 +// //===----------------------------------------------------------------------===// #include "AArch64FrameLowering.h" @@ -154,7 +220,6 @@ #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" -#include "llvm/Support/LEB128.h" #include "llvm/Support/MathExtras.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Target/TargetMachine.h" @@ -440,140 +505,81 @@ return MBB.erase(I); } -// Convenience function to create a DWARF expression for -// Expr + NumBytes + NumVGScaledBytes * AArch64::VG -static void appendVGScaledOffsetExpr(SmallVectorImpl &Expr, - int NumBytes, int NumVGScaledBytes, unsigned VG, - llvm::raw_string_ostream &Comment) { - uint8_t buffer[16]; +void AArch64FrameLowering::emitCalleeSavedGPRLocations( + MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI) const { + MachineFunction &MF = *MBB.getParent(); + MachineFrameInfo &MFI = MF.getFrameInfo(); - if (NumBytes) { - Expr.push_back(dwarf::DW_OP_consts); - Expr.append(buffer, buffer + encodeSLEB128(NumBytes, buffer)); - Expr.push_back((uint8_t)dwarf::DW_OP_plus); - Comment << (NumBytes < 0 ? " - " : " + ") << std::abs(NumBytes); - } + const std::vector &CSI = MFI.getCalleeSavedInfo(); + if (CSI.empty()) + return; - if (NumVGScaledBytes) { - Expr.push_back((uint8_t)dwarf::DW_OP_consts); - Expr.append(buffer, buffer + encodeSLEB128(NumVGScaledBytes, buffer)); + const TargetSubtargetInfo &STI = MF.getSubtarget(); + const TargetRegisterInfo &TRI = *STI.getRegisterInfo(); + const TargetInstrInfo &TII = *STI.getInstrInfo(); + DebugLoc DL = MBB.findDebugLoc(MBBI); - Expr.push_back((uint8_t)dwarf::DW_OP_bregx); - Expr.append(buffer, buffer + encodeULEB128(VG, buffer)); - Expr.push_back(0); + for (const auto &Info : CSI) { + if (MFI.getStackID(Info.getFrameIdx()) == TargetStackID::ScalableVector) + continue; - Expr.push_back((uint8_t)dwarf::DW_OP_mul); - Expr.push_back((uint8_t)dwarf::DW_OP_plus); + assert(!Info.isSpilledToReg() && "Spilling to registers not implemented"); + unsigned DwarfReg = TRI.getDwarfRegNum(Info.getReg(), true); - Comment << (NumVGScaledBytes < 0 ? " - " : " + ") - << std::abs(NumVGScaledBytes) << " * VG"; + int64_t Offset = + MFI.getObjectOffset(Info.getFrameIdx()) - getOffsetOfLocalArea(); + unsigned CFIIndex = MF.addFrameInst( + MCCFIInstruction::createOffset(nullptr, DwarfReg, Offset)); + BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION)) + .addCFIIndex(CFIIndex) + .setMIFlags(MachineInstr::FrameSetup); } } -// Creates an MCCFIInstruction: -// { DW_CFA_def_cfa_expression, ULEB128 (sizeof expr), expr } -MCCFIInstruction AArch64FrameLowering::createDefCFAExpressionFromSP( - const TargetRegisterInfo &TRI, const StackOffset &OffsetFromSP) const { - int64_t NumBytes, NumVGScaledBytes; - AArch64InstrInfo::decomposeStackOffsetForDwarfOffsets(OffsetFromSP, NumBytes, - NumVGScaledBytes); - - std::string CommentBuffer = "sp"; - llvm::raw_string_ostream Comment(CommentBuffer); - - // Build up the expression (SP + NumBytes + NumVGScaledBytes * AArch64::VG) - SmallString<64> Expr; - Expr.push_back((uint8_t)(dwarf::DW_OP_breg0 + /*SP*/ 31)); - Expr.push_back(0); - appendVGScaledOffsetExpr(Expr, NumBytes, NumVGScaledBytes, - TRI.getDwarfRegNum(AArch64::VG, true), Comment); - - // Wrap this into DW_CFA_def_cfa. - SmallString<64> DefCfaExpr; - DefCfaExpr.push_back(dwarf::DW_CFA_def_cfa_expression); - uint8_t buffer[16]; - DefCfaExpr.append(buffer, - buffer + encodeULEB128(Expr.size(), buffer)); - DefCfaExpr.append(Expr.str()); - return MCCFIInstruction::createEscape(nullptr, DefCfaExpr.str(), - Comment.str()); -} - -MCCFIInstruction AArch64FrameLowering::createCfaOffset( - const TargetRegisterInfo &TRI, unsigned Reg, - const StackOffset &OffsetFromDefCFA) const { - int64_t NumBytes, NumVGScaledBytes; - AArch64InstrInfo::decomposeStackOffsetForDwarfOffsets( - OffsetFromDefCFA, NumBytes, NumVGScaledBytes); - - unsigned DwarfReg = TRI.getDwarfRegNum(Reg, true); - - // Non-scalable offsets can use DW_CFA_offset directly. - if (!NumVGScaledBytes) - return MCCFIInstruction::createOffset(nullptr, DwarfReg, NumBytes); - - std::string CommentBuffer; - llvm::raw_string_ostream Comment(CommentBuffer); - Comment << printReg(Reg, &TRI) << " @ cfa"; - - // Build up expression (NumBytes + NumVGScaledBytes * AArch64::VG) - SmallString<64> OffsetExpr; - appendVGScaledOffsetExpr(OffsetExpr, NumBytes, NumVGScaledBytes, - TRI.getDwarfRegNum(AArch64::VG, true), Comment); - - // Wrap this into DW_CFA_expression - SmallString<64> CfaExpr; - CfaExpr.push_back(dwarf::DW_CFA_expression); - uint8_t buffer[16]; - CfaExpr.append(buffer, buffer + encodeULEB128(DwarfReg, buffer)); - CfaExpr.append(buffer, buffer + encodeULEB128(OffsetExpr.size(), buffer)); - CfaExpr.append(OffsetExpr.str()); - - return MCCFIInstruction::createEscape(nullptr, CfaExpr.str(), Comment.str()); -} - -void AArch64FrameLowering::emitCalleeSavedFrameMoves( +void AArch64FrameLowering::emitCalleeSavedSVELocations( MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI) const { MachineFunction &MF = *MBB.getParent(); MachineFrameInfo &MFI = MF.getFrameInfo(); - const TargetSubtargetInfo &STI = MF.getSubtarget(); - const TargetRegisterInfo *TRI = STI.getRegisterInfo(); - const TargetInstrInfo *TII = STI.getInstrInfo(); - DebugLoc DL = MBB.findDebugLoc(MBBI); // Add callee saved registers to move list. const std::vector &CSI = MFI.getCalleeSavedInfo(); if (CSI.empty()) return; + const TargetSubtargetInfo &STI = MF.getSubtarget(); + const TargetRegisterInfo &TRI = *STI.getRegisterInfo(); + const TargetInstrInfo &TII = *STI.getInstrInfo(); + DebugLoc DL = MBB.findDebugLoc(MBBI); + AArch64FunctionInfo &AFI = *MF.getInfo(); + for (const auto &Info : CSI) { - Register Reg = Info.getReg(); + if (!(MFI.getStackID(Info.getFrameIdx()) == TargetStackID::ScalableVector)) + continue; // Not all unwinders may know about SVE registers, so assume the lowest // common demoninator. - unsigned NewReg; - if (static_cast(TRI)->regNeedsCFI(Reg, NewReg)) - Reg = NewReg; - else + assert(!Info.isSpilledToReg() && "Spilling to registers not implemented"); + unsigned Reg = Info.getReg(); + if (!static_cast(TRI).regNeedsCFI(Reg, Reg)) continue; - StackOffset Offset; - if (MFI.getStackID(Info.getFrameIdx()) == TargetStackID::ScalableVector) { - AArch64FunctionInfo *AFI = MF.getInfo(); - Offset = - StackOffset::getScalable(MFI.getObjectOffset(Info.getFrameIdx())) - - StackOffset::getFixed(AFI->getCalleeSavedStackSize(MFI)); - } else { - Offset = StackOffset::getFixed(MFI.getObjectOffset(Info.getFrameIdx()) - - getOffsetOfLocalArea()); - } - unsigned CFIIndex = MF.addFrameInst(createCfaOffset(*TRI, Reg, Offset)); - BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION)) + StackOffset Offset = + StackOffset::getScalable(MFI.getObjectOffset(Info.getFrameIdx())) - + StackOffset::getFixed(AFI.getCalleeSavedStackSize(MFI)); + + unsigned CFIIndex = MF.addFrameInst(createCFAOffset(TRI, Reg, Offset)); + BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION)) .addCFIIndex(CFIIndex) .setMIFlags(MachineInstr::FrameSetup); } } +void AArch64FrameLowering::emitCalleeSavedFrameMoves( + MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI) const { + emitCalleeSavedGPRLocations(MBB, MBBI); + emitCalleeSavedSVELocations(MBB, MBBI); +} + // Find a scratch register that we can use at the start of the prologue to // re-align the stack pointer. We avoid using callee-save registers since they // may appear to be free when this is called from canUseAsPrologue (during @@ -881,7 +887,7 @@ static MachineBasicBlock::iterator convertCalleeSaveRestoreToSPPrePostIncDec( MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, const TargetInstrInfo *TII, int CSStackSizeInc, - bool NeedsWinCFI, bool *HasWinCFI, bool InProlog = true) { + bool NeedsWinCFI, bool *HasWinCFI, bool EmitCFI, bool InProlog = true) { unsigned NewOpc; switch (MBBI->getOpcode()) { default: @@ -940,12 +946,15 @@ // If the first store isn't right where we want SP then we can't fold the // update in so create a normal arithmetic instruction instead. + MachineFunction &MF = *MBB.getParent(); if (MBBI->getOperand(MBBI->getNumOperands() - 1).getImm() != 0 || CSStackSizeInc < MinOffset || CSStackSizeInc > MaxOffset) { emitFrameOffset(MBB, MBBI, DL, AArch64::SP, AArch64::SP, StackOffset::getFixed(CSStackSizeInc), TII, InProlog ? MachineInstr::FrameSetup - : MachineInstr::FrameDestroy); + : MachineInstr::FrameDestroy, + false, false, nullptr, EmitCFI && InProlog); + return std::prev(MBBI); } @@ -976,6 +985,14 @@ InProlog ? MachineInstr::FrameSetup : MachineInstr::FrameDestroy); } + if (EmitCFI && InProlog) { + unsigned CFIIndex = MF.addFrameInst( + MCCFIInstruction::cfiDefCfaOffset(nullptr, -CSStackSizeInc)); + BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION)) + .addCFIIndex(CFIIndex) + .setMIFlags(MachineInstr::FrameSetup); + } + return std::prev(MBB.erase(MBBI)); } @@ -1280,14 +1297,16 @@ assert(!SVEStackSize && "Cannot combine SP bump with SVE"); emitFrameOffset(MBB, MBBI, DL, AArch64::SP, AArch64::SP, StackOffset::getFixed(-NumBytes), TII, - MachineInstr::FrameSetup, false, NeedsWinCFI, &HasWinCFI); + MachineInstr::FrameSetup, false, NeedsWinCFI, &HasWinCFI, + EmitCFI); NumBytes = 0; } else if (HomPrologEpilog) { // Stack has been already adjusted. NumBytes -= PrologueSaveSize; } else if (PrologueSaveSize != 0) { MBBI = convertCalleeSaveRestoreToSPPrePostIncDec( - MBB, MBBI, DL, TII, -PrologueSaveSize, NeedsWinCFI, &HasWinCFI); + MBB, MBBI, DL, TII, -PrologueSaveSize, NeedsWinCFI, &HasWinCFI, + EmitCFI); NumBytes -= PrologueSaveSize; } assert(NumBytes >= 0 && "Negative stack allocation size!?"); @@ -1341,8 +1360,27 @@ StackOffset::getFixed(FPOffset), TII, MachineInstr::FrameSetup, false, NeedsWinCFI, &HasWinCFI); } + if (EmitCFI) { + // Define the current CFA rule to use the provided FP. + const int OffsetToFirstCalleeSaveFromFP = + AFI->getCalleeSaveBaseToFrameRecordOffset() - + AFI->getCalleeSavedStackSize(); + Register FramePtr = RegInfo->getFrameRegister(MF); + unsigned Reg = RegInfo->getDwarfRegNum(FramePtr, true); + unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::cfiDefCfa( + nullptr, Reg, FixedObject - OffsetToFirstCalleeSaveFromFP)); + BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION)) + .addCFIIndex(CFIIndex) + .setMIFlags(MachineInstr::FrameSetup); + } } + // Now emit the moves for whatever callee saved regs we have (including FP, + // LR if those are saved). Frame instructions for SVE register are emitted + // later, after the instruction which actually save SVE regs. + if (EmitCFI) + emitCalleeSavedGPRLocations(MBB, MBBI); + if (windowsRequiresStackProbe(MF, NumBytes)) { uint64_t NumWords = NumBytes >> 4; if (NeedsWinCFI) { @@ -1455,14 +1493,21 @@ } // Allocate space for the callee saves (if any). - emitFrameOffset(MBB, CalleeSavesBegin, DL, AArch64::SP, AArch64::SP, - -AllocateBefore, TII, - MachineInstr::FrameSetup); + emitFrameOffset( + MBB, CalleeSavesBegin, DL, AArch64::SP, AArch64::SP, -AllocateBefore, TII, + MachineInstr::FrameSetup, false, false, nullptr, + EmitCFI && !HasFP && AllocateBefore, + StackOffset::getFixed((int64_t)MFI.getStackSize() - NumBytes)); + + if (EmitCFI) + emitCalleeSavedSVELocations(MBB, CalleeSavesEnd); // Finally allocate remaining SVE stack space. emitFrameOffset(MBB, CalleeSavesEnd, DL, AArch64::SP, AArch64::SP, - -AllocateAfter, TII, - MachineInstr::FrameSetup); + -AllocateAfter, TII, MachineInstr::FrameSetup, false, false, + nullptr, EmitCFI && !HasFP && AllocateAfter, + AllocateBefore + StackOffset::getFixed( + (int64_t)MFI.getStackSize() - NumBytes)); // Allocate space for the rest of the frame. if (NumBytes) { @@ -1477,14 +1522,17 @@ } // If we're a leaf function, try using the red zone. - if (!canUseRedZone(MF)) + if (!canUseRedZone(MF)) { // FIXME: in the case of dynamic re-alignment, NumBytes doesn't have // the correct value here, as NumBytes also includes padding bytes, // which shouldn't be counted here. - emitFrameOffset(MBB, MBBI, DL, scratchSPReg, AArch64::SP, - StackOffset::getFixed(-NumBytes), TII, - MachineInstr::FrameSetup, false, NeedsWinCFI, &HasWinCFI); - + emitFrameOffset( + MBB, MBBI, DL, scratchSPReg, AArch64::SP, + StackOffset::getFixed(-NumBytes), TII, MachineInstr::FrameSetup, + false, NeedsWinCFI, &HasWinCFI, EmitCFI && !HasFP, + SVEStackSize + + StackOffset::getFixed((int64_t)MFI.getStackSize() - NumBytes)); + } if (NeedsRealignment) { const unsigned NrBitsToZero = Log2(MFI.getMaxAlign()); assert(NrBitsToZero > 1); @@ -1551,109 +1599,6 @@ MBB.addLiveIn(AArch64::X1); } } - - if (EmitCFI) { - // An example of the prologue: - // - // .globl __foo - // .align 2 - // __foo: - // Ltmp0: - // .cfi_startproc - // .cfi_personality 155, ___gxx_personality_v0 - // Leh_func_begin: - // .cfi_lsda 16, Lexception33 - // - // stp xa,bx, [sp, -#offset]! - // ... - // stp x28, x27, [sp, #offset-32] - // stp fp, lr, [sp, #offset-16] - // add fp, sp, #offset - 16 - // sub sp, sp, #1360 - // - // The Stack: - // +-------------------------------------------+ - // 10000 | ........ | ........ | ........ | ........ | - // 10004 | ........ | ........ | ........ | ........ | - // +-------------------------------------------+ - // 10008 | ........ | ........ | ........ | ........ | - // 1000c | ........ | ........ | ........ | ........ | - // +===========================================+ - // 10010 | X28 Register | - // 10014 | X28 Register | - // +-------------------------------------------+ - // 10018 | X27 Register | - // 1001c | X27 Register | - // +===========================================+ - // 10020 | Frame Pointer | - // 10024 | Frame Pointer | - // +-------------------------------------------+ - // 10028 | Link Register | - // 1002c | Link Register | - // +===========================================+ - // 10030 | ........ | ........ | ........ | ........ | - // 10034 | ........ | ........ | ........ | ........ | - // +-------------------------------------------+ - // 10038 | ........ | ........ | ........ | ........ | - // 1003c | ........ | ........ | ........ | ........ | - // +-------------------------------------------+ - // - // [sp] = 10030 :: >>initial value<< - // sp = 10020 :: stp fp, lr, [sp, #-16]! - // fp = sp == 10020 :: mov fp, sp - // [sp] == 10020 :: stp x28, x27, [sp, #-16]! - // sp == 10010 :: >>final value<< - // - // The frame pointer (w29) points to address 10020. If we use an offset of - // '16' from 'w29', we get the CFI offsets of -8 for w30, -16 for w29, -24 - // for w27, and -32 for w28: - // - // Ltmp1: - // .cfi_def_cfa w29, 16 - // Ltmp2: - // .cfi_offset w30, -8 - // Ltmp3: - // .cfi_offset w29, -16 - // Ltmp4: - // .cfi_offset w27, -24 - // Ltmp5: - // .cfi_offset w28, -32 - - if (HasFP) { - const int OffsetToFirstCalleeSaveFromFP = - AFI->getCalleeSaveBaseToFrameRecordOffset() - - AFI->getCalleeSavedStackSize(); - Register FramePtr = RegInfo->getFrameRegister(MF); - - // Define the current CFA rule to use the provided FP. - unsigned Reg = RegInfo->getDwarfRegNum(FramePtr, true); - unsigned CFIIndex = MF.addFrameInst( - MCCFIInstruction::cfiDefCfa(nullptr, Reg, FixedObject - OffsetToFirstCalleeSaveFromFP)); - BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION)) - .addCFIIndex(CFIIndex) - .setMIFlags(MachineInstr::FrameSetup); - } else { - unsigned CFIIndex; - if (SVEStackSize) { - const TargetSubtargetInfo &STI = MF.getSubtarget(); - const TargetRegisterInfo &TRI = *STI.getRegisterInfo(); - StackOffset TotalSize = - SVEStackSize + StackOffset::getFixed((int64_t)MFI.getStackSize()); - CFIIndex = MF.addFrameInst(createDefCFAExpressionFromSP(TRI, TotalSize)); - } else { - // Encode the stack size of the leaf function. - CFIIndex = MF.addFrameInst( - MCCFIInstruction::cfiDefCfaOffset(nullptr, MFI.getStackSize())); - } - BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION)) - .addCFIIndex(CFIIndex) - .setMIFlags(MachineInstr::FrameSetup); - } - - // Now emit the moves for whatever callee saved regs we have (including FP, - // LR if those are saved). - emitCalleeSavedFrameMoves(MBB, MBBI); - } } static void InsertReturnAddressAuth(MachineFunction &MF, @@ -1806,8 +1751,9 @@ // allocate more stack for arguments (in space that an untimely interrupt // may clobber), convert it to a post-index ldp. if (OffsetOp.getImm() == 0 && AfterCSRPopSize >= 0) - convertCalleeSaveRestoreToSPPrePostIncDec( - MBB, Pop, DL, TII, PrologueSaveSize, NeedsWinCFI, &HasWinCFI, false); + convertCalleeSaveRestoreToSPPrePostIncDec(MBB, Pop, DL, TII, + PrologueSaveSize, NeedsWinCFI, + &HasWinCFI, false, false); else { // If not, make sure to emit an add after the last ldp. // We're doing this by transfering the size to be restored from the diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.h b/llvm/lib/Target/AArch64/AArch64InstrInfo.h --- a/llvm/lib/Target/AArch64/AArch64InstrInfo.h +++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.h @@ -395,6 +395,11 @@ const MachineInstr &UseMI, const TargetRegisterInfo *TRI); +MCCFIInstruction createDefCFA(const TargetRegisterInfo &TRI, unsigned FrameReg, + unsigned Reg, const StackOffset &Offset); +MCCFIInstruction createCFAOffset(const TargetRegisterInfo &MRI, unsigned Reg, + const StackOffset &OffsetFromDefCFA); + /// emitFrameOffset - Emit instructions as needed to set DestReg to SrcReg /// plus Offset. This is intended to be used from within the prolog/epilog /// insertion (PEI) pass, where a virtual scratch register may be allocated @@ -404,7 +409,9 @@ StackOffset Offset, const TargetInstrInfo *TII, MachineInstr::MIFlag = MachineInstr::NoFlags, bool SetNZCV = false, bool NeedsWinCFI = false, - bool *HasWinCFI = nullptr); + bool *HasWinCFI = nullptr, bool EmitCFAOffset = false, + StackOffset InitialOffset = {}, + unsigned FrameReg = AArch64::SP); /// rewriteAArch64FrameIndex - Rewrite MI to access 'Offset' bytes from the /// FP. Return false if the offset could not be handled directly in MI, and diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp --- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp +++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp @@ -42,6 +42,7 @@ #include "llvm/Support/CommandLine.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/LEB128.h" #include "llvm/Support/MathExtras.h" #include "llvm/Target/TargetMachine.h" #include "llvm/Target/TargetOptions.h" @@ -4078,6 +4079,118 @@ } } +// Convenience function to create a DWARF expression for +// Expr + NumBytes + NumVGScaledBytes * AArch64::VG +static void appendVGScaledOffsetExpr(SmallVectorImpl &Expr, int NumBytes, + int NumVGScaledBytes, unsigned VG, + llvm::raw_string_ostream &Comment) { + uint8_t buffer[16]; + + if (NumBytes) { + Expr.push_back(dwarf::DW_OP_consts); + Expr.append(buffer, buffer + encodeSLEB128(NumBytes, buffer)); + Expr.push_back((uint8_t)dwarf::DW_OP_plus); + Comment << (NumBytes < 0 ? " - " : " + ") << std::abs(NumBytes); + } + + if (NumVGScaledBytes) { + Expr.push_back((uint8_t)dwarf::DW_OP_consts); + Expr.append(buffer, buffer + encodeSLEB128(NumVGScaledBytes, buffer)); + + Expr.push_back((uint8_t)dwarf::DW_OP_bregx); + Expr.append(buffer, buffer + encodeULEB128(VG, buffer)); + Expr.push_back(0); + + Expr.push_back((uint8_t)dwarf::DW_OP_mul); + Expr.push_back((uint8_t)dwarf::DW_OP_plus); + + Comment << (NumVGScaledBytes < 0 ? " - " : " + ") + << std::abs(NumVGScaledBytes) << " * VG"; + } +} + +// Creates an MCCFIInstruction: +// { DW_CFA_def_cfa_expression, ULEB128 (sizeof expr), expr } +static MCCFIInstruction createDefCFAExpression(const TargetRegisterInfo &TRI, + unsigned Reg, + const StackOffset &Offset) { + int64_t NumBytes, NumVGScaledBytes; + AArch64InstrInfo::decomposeStackOffsetForDwarfOffsets(Offset, NumBytes, + NumVGScaledBytes); + std::string CommentBuffer; + llvm::raw_string_ostream Comment(CommentBuffer); + + if (Reg == AArch64::SP) + Comment << "sp"; + else if (Reg == AArch64::FP) + Comment << "fp"; + else + Comment << printReg(Reg, &TRI); + + // Build up the expression (Reg + NumBytes + NumVGScaledBytes * AArch64::VG) + SmallString<64> Expr; + unsigned DwarfReg = TRI.getDwarfRegNum(Reg, true); + Expr.push_back((uint8_t)(dwarf::DW_OP_breg0 + DwarfReg)); + Expr.push_back(0); + appendVGScaledOffsetExpr(Expr, NumBytes, NumVGScaledBytes, + TRI.getDwarfRegNum(AArch64::VG, true), Comment); + + // Wrap this into DW_CFA_def_cfa. + SmallString<64> DefCfaExpr; + DefCfaExpr.push_back(dwarf::DW_CFA_def_cfa_expression); + uint8_t buffer[16]; + DefCfaExpr.append(buffer, buffer + encodeULEB128(Expr.size(), buffer)); + DefCfaExpr.append(Expr.str()); + return MCCFIInstruction::createEscape(nullptr, DefCfaExpr.str(), + Comment.str()); +} + +MCCFIInstruction llvm::createDefCFA(const TargetRegisterInfo &TRI, + unsigned FrameReg, unsigned Reg, + const StackOffset &Offset) { + if (Offset.getScalable()) + return createDefCFAExpression(TRI, Reg, Offset); + + if (FrameReg == Reg) + return MCCFIInstruction::cfiDefCfaOffset(nullptr, int(Offset.getFixed())); + + unsigned DwarfReg = TRI.getDwarfRegNum(Reg, true); + return MCCFIInstruction::cfiDefCfa(nullptr, DwarfReg, (int)Offset.getFixed()); +} + +MCCFIInstruction llvm::createCFAOffset(const TargetRegisterInfo &TRI, + unsigned Reg, + const StackOffset &OffsetFromDefCFA) { + int64_t NumBytes, NumVGScaledBytes; + AArch64InstrInfo::decomposeStackOffsetForDwarfOffsets( + OffsetFromDefCFA, NumBytes, NumVGScaledBytes); + + unsigned DwarfReg = TRI.getDwarfRegNum(Reg, true); + + // Non-scalable offsets can use DW_CFA_offset directly. + if (!NumVGScaledBytes) + return MCCFIInstruction::createOffset(nullptr, DwarfReg, NumBytes); + + std::string CommentBuffer; + llvm::raw_string_ostream Comment(CommentBuffer); + Comment << printReg(Reg, &TRI) << " @ cfa"; + + // Build up expression (NumBytes + NumVGScaledBytes * AArch64::VG) + SmallString<64> OffsetExpr; + appendVGScaledOffsetExpr(OffsetExpr, NumBytes, NumVGScaledBytes, + TRI.getDwarfRegNum(AArch64::VG, true), Comment); + + // Wrap this into DW_CFA_expression + SmallString<64> CfaExpr; + CfaExpr.push_back(dwarf::DW_CFA_expression); + uint8_t buffer[16]; + CfaExpr.append(buffer, buffer + encodeULEB128(DwarfReg, buffer)); + CfaExpr.append(buffer, buffer + encodeULEB128(OffsetExpr.size(), buffer)); + CfaExpr.append(OffsetExpr.str()); + + return MCCFIInstruction::createEscape(nullptr, CfaExpr.str(), Comment.str()); +} + // Helper function to emit a frame offset adjustment from a given // pointer (SrcReg), stored into DestReg. This function is explicit // in that it requires the opcode. @@ -4087,7 +4200,8 @@ unsigned SrcReg, int64_t Offset, unsigned Opc, const TargetInstrInfo *TII, MachineInstr::MIFlag Flag, bool NeedsWinCFI, - bool *HasWinCFI) { + bool *HasWinCFI, bool EmitCFAOffset, + StackOffset CFAOffset, unsigned FrameReg) { int Sign = 1; unsigned MaxEncoding, ShiftSize; switch (Opc) { @@ -4112,6 +4226,13 @@ llvm_unreachable("Unsupported opcode"); } + // `Offset` can be in bytes or in "scalable bytes". + int VScale = 1; + if (Opc == AArch64::ADDVL_XXI) + VScale = 16; + else if (Opc == AArch64::ADDPL_XXI) + VScale = 2; + // FIXME: If the offset won't fit in 24-bits, compute the offset into a // scratch register. If DestReg is a virtual register, use it as the // scratch register; otherwise, create a new virtual register (to be @@ -4149,6 +4270,26 @@ AArch64_AM::getShifterImm(AArch64_AM::LSL, LocalShiftSize)); MBI = MBI.setMIFlag(Flag); + auto Change = + VScale == 1 + ? StackOffset::getFixed(ThisVal << LocalShiftSize) + : StackOffset::getScalable(VScale * (ThisVal << LocalShiftSize)); + if (Sign == -1 || Opc == AArch64::SUBXri || Opc == AArch64::SUBSXri) + CFAOffset += Change; + else + CFAOffset -= Change; + if (EmitCFAOffset && DestReg == TmpReg) { + MachineFunction &MF = *MBB.getParent(); + const TargetSubtargetInfo &STI = MF.getSubtarget(); + const TargetRegisterInfo &TRI = *STI.getRegisterInfo(); + + unsigned CFIIndex = + MF.addFrameInst(createDefCFA(TRI, FrameReg, DestReg, CFAOffset)); + BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION)) + .addCFIIndex(CFIIndex) + .setMIFlags(Flag); + } + if (NeedsWinCFI) { assert(Sign == 1 && "SEH directives should always have a positive sign"); int Imm = (int)(ThisVal << LocalShiftSize); @@ -4185,7 +4326,9 @@ unsigned DestReg, unsigned SrcReg, StackOffset Offset, const TargetInstrInfo *TII, MachineInstr::MIFlag Flag, bool SetNZCV, - bool NeedsWinCFI, bool *HasWinCFI) { + bool NeedsWinCFI, bool *HasWinCFI, + bool EmitCFAOffset, StackOffset CFAOffset, + unsigned FrameReg) { int64_t Bytes, NumPredicateVectors, NumDataVectors; AArch64InstrInfo::decomposeStackOffsetForFrameOffsets( Offset, Bytes, NumPredicateVectors, NumDataVectors); @@ -4200,8 +4343,13 @@ Opc = SetNZCV ? AArch64::SUBSXri : AArch64::SUBXri; } emitFrameOffsetAdj(MBB, MBBI, DL, DestReg, SrcReg, Bytes, Opc, TII, Flag, - NeedsWinCFI, HasWinCFI); + NeedsWinCFI, HasWinCFI, EmitCFAOffset, CFAOffset, + FrameReg); + CFAOffset += (Opc == AArch64::ADDXri || Opc == AArch64::ADDSXri) + ? StackOffset::getFixed(-Bytes) + : StackOffset::getFixed(Bytes); SrcReg = DestReg; + FrameReg = DestReg; } assert(!(SetNZCV && (NumPredicateVectors || NumDataVectors)) && @@ -4211,14 +4359,17 @@ if (NumDataVectors) { emitFrameOffsetAdj(MBB, MBBI, DL, DestReg, SrcReg, NumDataVectors, - AArch64::ADDVL_XXI, TII, Flag, NeedsWinCFI, nullptr); + AArch64::ADDVL_XXI, TII, Flag, NeedsWinCFI, nullptr, + EmitCFAOffset, CFAOffset, FrameReg); + CFAOffset += StackOffset::getScalable(-NumDataVectors * 16); SrcReg = DestReg; } if (NumPredicateVectors) { assert(DestReg != AArch64::SP && "Unaligned access to SP"); emitFrameOffsetAdj(MBB, MBBI, DL, DestReg, SrcReg, NumPredicateVectors, - AArch64::ADDPL_XXI, TII, Flag, NeedsWinCFI, nullptr); + AArch64::ADDPL_XXI, TII, Flag, NeedsWinCFI, nullptr, + EmitCFAOffset, CFAOffset, FrameReg); } } diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/byval-call.ll b/llvm/test/CodeGen/AArch64/GlobalISel/byval-call.ll --- a/llvm/test/CodeGen/AArch64/GlobalISel/byval-call.ll +++ b/llvm/test/CodeGen/AArch64/GlobalISel/byval-call.ll @@ -7,8 +7,8 @@ ; CHECK-LABEL: call_byval_i32: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #32 -; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 32 +; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: ldr w8, [x0] ; CHECK-NEXT: str w8, [sp] @@ -26,6 +26,7 @@ ; CHECK-LABEL: call_byval_a64i32: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #288 +; CHECK-NEXT: .cfi_def_cfa_offset 288 ; CHECK-NEXT: stp x29, x30, [sp, #256] // 16-byte Folded Spill ; CHECK-NEXT: str x28, [sp, #272] // 8-byte Folded Spill ; CHECK-NEXT: add x29, sp, #256 diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/call-translator-variadic-musttail.ll b/llvm/test/CodeGen/AArch64/GlobalISel/call-translator-variadic-musttail.ll --- a/llvm/test/CodeGen/AArch64/GlobalISel/call-translator-variadic-musttail.ll +++ b/llvm/test/CodeGen/AArch64/GlobalISel/call-translator-variadic-musttail.ll @@ -31,13 +31,13 @@ ; CHECK-LABEL: test_musttail_variadic_spill: ; CHECK: ; %bb.0: ; CHECK-NEXT: sub sp, sp, #224 +; CHECK-NEXT: .cfi_def_cfa_offset 224 ; CHECK-NEXT: stp x28, x27, [sp, #128] ; 16-byte Folded Spill ; CHECK-NEXT: stp x26, x25, [sp, #144] ; 16-byte Folded Spill ; CHECK-NEXT: stp x24, x23, [sp, #160] ; 16-byte Folded Spill ; CHECK-NEXT: stp x22, x21, [sp, #176] ; 16-byte Folded Spill ; CHECK-NEXT: stp x20, x19, [sp, #192] ; 16-byte Folded Spill ; CHECK-NEXT: stp x29, x30, [sp, #208] ; 16-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 224 ; CHECK-NEXT: .cfi_offset w30, -8 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: .cfi_offset w19, -24 @@ -103,13 +103,13 @@ ; CHECK-LABEL: f_thunk: ; CHECK: ; %bb.0: ; CHECK-NEXT: sub sp, sp, #256 +; CHECK-NEXT: .cfi_def_cfa_offset 256 ; CHECK-NEXT: stp x28, x27, [sp, #160] ; 16-byte Folded Spill ; CHECK-NEXT: stp x26, x25, [sp, #176] ; 16-byte Folded Spill ; CHECK-NEXT: stp x24, x23, [sp, #192] ; 16-byte Folded Spill ; CHECK-NEXT: stp x22, x21, [sp, #208] ; 16-byte Folded Spill ; CHECK-NEXT: stp x20, x19, [sp, #224] ; 16-byte Folded Spill ; CHECK-NEXT: stp x29, x30, [sp, #240] ; 16-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 256 ; CHECK-NEXT: .cfi_offset w30, -8 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: .cfi_offset w19, -24 diff --git a/llvm/test/CodeGen/AArch64/aarch64-dynamic-stack-layout.ll b/llvm/test/CodeGen/AArch64/aarch64-dynamic-stack-layout.ll --- a/llvm/test/CodeGen/AArch64/aarch64-dynamic-stack-layout.ll +++ b/llvm/test/CodeGen/AArch64/aarch64-dynamic-stack-layout.ll @@ -99,9 +99,9 @@ ; CHECK: .cfi_startproc ; Check that used callee-saved registers are saved ; CHECK: sub sp, sp, #32 +; CHECK: .cfi_def_cfa_offset 32 ; CHECK: stp x30, x19, [sp, #16] ; Check correctness of cfi pseudo-instructions -; CHECK: .cfi_def_cfa_offset 32 ; CHECK: .cfi_offset w19, -8 ; CHECK: .cfi_offset w30, -16 ; Check correct access to arguments passed on the stack, through stack pointer @@ -118,6 +118,7 @@ ; CHECK-MACHO: .cfi_startproc ; Check that used callee-saved registers are saved ; CHECK-MACHO: sub sp, sp, #48 +; CHECK-MACHO:.cfi_def_cfa_offset 48 ; CHECK-MACHO: stp x20, x19, [sp, #16] ; Check that the frame pointer is created: ; CHECK-MACHO: stp x29, x30, [sp, #32] @@ -181,17 +182,18 @@ ; CHECK: .cfi_startproc ; Check that used callee-saved registers are saved ; CHECK: stp x29, x30, [sp, #-32]! +; CHECK: .cfi_def_cfa_offset 32 ; Check that the frame pointer is created: ; CHECK: str x19, [sp, #16] ; CHECK: mov x29, sp -; Check the dynamic realignment of the stack pointer to a 128-byte boundary -; CHECK: sub x9, sp, #96 -; CHECK: and sp, x9, #0xffffffffffffff80 ; Check correctness of cfi pseudo-instructions ; CHECK: .cfi_def_cfa w29, 32 ; CHECK: .cfi_offset w19, -16 ; CHECK: .cfi_offset w30, -24 ; CHECK: .cfi_offset w29, -32 +; Check the dynamic realignment of the stack pointer to a 128-byte boundary +; CHECK: sub x9, sp, #96 +; CHECK: and sp, x9, #0xffffffffffffff80 ; Check correct access to arguments passed on the stack, through frame pointer ; CHECK: ldr d[[DARG:[0-9]+]], [x29, #56] ; CHECK: ldr w[[IARG:[0-9]+]], [x29, #40] @@ -209,18 +211,19 @@ ; CHECK-MACHO: .cfi_startproc ; Check that used callee-saved registers are saved ; CHECK-MACHO: stp x20, x19, [sp, #-32]! +; CHECK-MACHO: .cfi_def_cfa_offset 32 ; Check that the frame pointer is created: ; CHECK-MACHO: stp x29, x30, [sp, #16] ; CHECK-MACHO: add x29, sp, #16 -; Check the dynamic realignment of the stack pointer to a 128-byte boundary -; CHECK-MACHO: sub x9, sp, #96 -; CHECK-MACHO: and sp, x9, #0xffffffffffffff80 ; Check correctness of cfi pseudo-instructions ; CHECK-MACHO: .cfi_def_cfa w29, 16 ; CHECK-MACHO: .cfi_offset w30, -8 ; CHECK-MACHO: .cfi_offset w29, -16 ; CHECK-MACHO: .cfi_offset w19, -24 ; CHECK-MACHO: .cfi_offset w20, -32 +; Check the dynamic realignment of the stack pointer to a 128-byte boundary +; CHECK-MACHO: sub x9, sp, #96 +; CHECK-MACHO: and sp, x9, #0xffffffffffffff80 ; Check correct access to arguments passed on the stack, through frame pointer ; CHECK-MACHO: ldr d[[DARG:[0-9]+]], [x29, #32] ; CHECK-MACHO: ldr w[[IARG:[0-9]+]], [x29, #20] @@ -285,18 +288,19 @@ ; CHECK: .cfi_startproc ; Check that used callee-saved registers are saved ; CHECK: stp x29, x30, [sp, #-32]! +; CHECK: .cfi_def_cfa_offset 32 ; Check that the frame pointer is created: ; CHECK: stp x20, x19, [sp, #16] ; CHECK: mov x29, sp -; Check that space is reserved on the stack for the local variable, -; rounded up to a multiple of 16 to keep the stack pointer 16-byte aligned. -; CHECK: sub sp, sp, #16 ; Check correctness of cfi pseudo-instructions ; CHECK: .cfi_def_cfa w29, 32 ; CHECK: .cfi_offset w19, -8 ; CHECK: .cfi_offset w20, -16 ; CHECK: .cfi_offset w30, -24 ; CHECK: .cfi_offset w29, -32 +; Check that space is reserved on the stack for the local variable, +; rounded up to a multiple of 16 to keep the stack pointer 16-byte aligned. +; CHECK: sub sp, sp, #16 ; Check correct access to arguments passed on the stack, through frame pointer ; CHECK: ldr w[[IARG:[0-9]+]], [x29, #40] ; CHECK: ldr d[[DARG:[0-9]+]], [x29, #56] @@ -386,17 +390,11 @@ ; CHECK: .cfi_startproc ; Check that used callee-saved registers are saved ; CHECK: stp x29, x30, [sp, #-48]! +; CHECK: .cfi_def_cfa_offset 48 ; CHECK: str x21, [sp, #16] ; CHECK: stp x20, x19, [sp, #32] ; Check that the frame pointer is created: ; CHECK: mov x29, sp -; Check that the stack pointer gets re-aligned to 128 -; bytes & the base pointer (x19) gets initialized to -; this 128-byte aligned area for local variables & -; spill slots -; CHECK: sub x9, sp, #80 -; CHECK: and sp, x9, #0xffffffffffffff80 -; CHECK: mov x19, sp ; Check correctness of cfi pseudo-instructions ; CHECK: .cfi_def_cfa w29, 48 ; CHECK: .cfi_offset w19, -8 @@ -404,6 +402,13 @@ ; CHECK: .cfi_offset w21, -32 ; CHECK: .cfi_offset w30, -40 ; CHECK: .cfi_offset w29, -48 +; Check that the stack pointer gets re-aligned to 128 +; bytes & the base pointer (x19) gets initialized to +; this 128-byte aligned area for local variables & +; spill slots +; CHECK: sub x9, sp, #80 +; CHECK: and sp, x9, #0xffffffffffffff80 +; CHECK: mov x19, sp ; Check correct access to arguments passed on the stack, through frame pointer ; CHECK: ldr w[[IARG:[0-9]+]], [x29, #56] ; CHECK: ldr d[[DARG:[0-9]+]], [x29, #72] @@ -432,17 +437,11 @@ ; CHECK-MACHO: .cfi_startproc ; Check that used callee-saved registers are saved ; CHECK-MACHO: stp x22, x21, [sp, #-48]! +; CHECK-MACHO: .cfi_def_cfa_offset 48 ; CHECK-MACHO: stp x20, x19, [sp, #16] ; Check that the frame pointer is created: ; CHECK-MACHO: stp x29, x30, [sp, #32] ; CHECK-MACHO: add x29, sp, #32 -; Check that the stack pointer gets re-aligned to 128 -; bytes & the base pointer (x19) gets initialized to -; this 128-byte aligned area for local variables & -; spill slots -; CHECK-MACHO: sub x9, sp, #80 -; CHECK-MACHO: and sp, x9, #0xffffffffffffff80 -; CHECK-MACHO: mov x19, sp ; Check correctness of cfi pseudo-instructions ; CHECK-MACHO: .cfi_def_cfa w29, 16 ; CHECK-MACHO: .cfi_offset w30, -8 @@ -451,6 +450,13 @@ ; CHECK-MACHO: .cfi_offset w20, -32 ; CHECK-MACHO: .cfi_offset w21, -40 ; CHECK-MACHO: .cfi_offset w22, -48 +; Check that the stack pointer gets re-aligned to 128 +; bytes & the base pointer (x19) gets initialized to +; this 128-byte aligned area for local variables & +; spill slots +; CHECK-MACHO: sub x9, sp, #80 +; CHECK-MACHO: and sp, x9, #0xffffffffffffff80 +; CHECK-MACHO: mov x19, sp ; Check correct access to arguments passed on the stack, through frame pointer ; CHECK-MACHO: ldr w[[IARG:[0-9]+]], [x29, #20] ; CHECK-MACHO: ldr d[[DARG:[0-9]+]], [x29, #32] diff --git a/llvm/test/CodeGen/AArch64/aarch64-mops-consecutive.ll b/llvm/test/CodeGen/AArch64/aarch64-mops-consecutive.ll --- a/llvm/test/CodeGen/AArch64/aarch64-mops-consecutive.ll +++ b/llvm/test/CodeGen/AArch64/aarch64-mops-consecutive.ll @@ -9,34 +9,35 @@ define void @consecutive() { ; CHECK-MOPS-LABEL: consecutive: ; CHECK-MOPS: // %bb.0: // %entry -; CHECK-MOPS-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill -; CHECK-MOPS-NEXT: sub sp, sp, #2016 -; CHECK-MOPS-NEXT: .cfi_def_cfa_offset 2032 +; CHECK-MOPS-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill +; CHECK-MOPS-NEXT: .cfi_def_cfa_offset 16 ; CHECK-MOPS-NEXT: .cfi_offset w30, -8 ; CHECK-MOPS-NEXT: .cfi_offset w29, -16 -; CHECK-MOPS-NEXT: mov w8, #1000 -; CHECK-MOPS-NEXT: add x9, sp, #8 -; CHECK-MOPS-NEXT: adrp x10, .LCPI0_0 -; CHECK-MOPS-NEXT: adrp x11, .LCPI0_1 -; CHECK-MOPS-NEXT: mov w12, #6424 -; CHECK-MOPS-NEXT: mov w13, #7452 -; CHECK-MOPS-NEXT: setp [x9]!, x8!, xzr -; CHECK-MOPS-NEXT: setm [x9]!, x8!, xzr -; CHECK-MOPS-NEXT: sete [x9]!, x8!, xzr -; CHECK-MOPS-NEXT: movk w12, #6938, lsl #16 -; CHECK-MOPS-NEXT: ldr q0, [x10, :lo12:.LCPI0_0] -; CHECK-MOPS-NEXT: mov w8, #30 -; CHECK-MOPS-NEXT: ldr d1, [x11, :lo12:.LCPI0_1] -; CHECK-MOPS-NEXT: add x0, sp, #1008 -; CHECK-MOPS-NEXT: add x1, sp, #8 -; CHECK-MOPS-NEXT: str w12, [sp, #1032] -; CHECK-MOPS-NEXT: strh w13, [sp, #1036] -; CHECK-MOPS-NEXT: str q0, [sp, #1008] -; CHECK-MOPS-NEXT: str d1, [sp, #1024] -; CHECK-MOPS-NEXT: strb w8, [sp, #1038] -; CHECK-MOPS-NEXT: bl fn -; CHECK-MOPS-NEXT: add sp, sp, #2016 -; CHECK-MOPS-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload +; CHECK-MOPS-NEXT: sub sp, sp, #2016 +; CHECK-MOPS-NEXT: .cfi_def_cfa_offset 2032 +; CHECK-MOPS-NEXT: mov w8, #1000 +; CHECK-MOPS-NEXT: add x9, sp, #8 +; CHECK-MOPS-NEXT: adrp x10, .LCPI0_0 +; CHECK-MOPS-NEXT: adrp x11, .LCPI0_1 +; CHECK-MOPS-NEXT: mov w12, #6424 +; CHECK-MOPS-NEXT: mov w13, #7452 +; CHECK-MOPS-NEXT: setp [x9]!, x8!, xzr +; CHECK-MOPS-NEXT: setm [x9]!, x8!, xzr +; CHECK-MOPS-NEXT: sete [x9]!, x8!, xzr +; CHECK-MOPS-NEXT: movk w12, #6938, lsl #16 +; CHECK-MOPS-NEXT: ldr q0, [x10, :lo12:.LCPI0_0] +; CHECK-MOPS-NEXT: mov w8, #30 +; CHECK-MOPS-NEXT: ldr d1, [x11, :lo12:.LCPI0_1] +; CHECK-MOPS-NEXT: add x0, sp, #1008 +; CHECK-MOPS-NEXT: add x1, sp, #8 +; CHECK-MOPS-NEXT: str w12, [sp, #1032] +; CHECK-MOPS-NEXT: strh w13, [sp, #1036] +; CHECK-MOPS-NEXT: str q0, [sp, #1008] +; CHECK-MOPS-NEXT: str d1, [sp, #1024] +; CHECK-MOPS-NEXT: strb w8, [sp, #1038] +; CHECK-MOPS-NEXT: bl fn +; CHECK-MOPS-NEXT: add sp, sp, #2016 +; CHECK-MOPS-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload ; CHECK-MOPS-NEXT: ret entry: %buf_from = alloca [1000 x i8], align 16 diff --git a/llvm/test/CodeGen/AArch64/aarch64-mops.ll b/llvm/test/CodeGen/AArch64/aarch64-mops.ll --- a/llvm/test/CodeGen/AArch64/aarch64-mops.ll +++ b/llvm/test/CodeGen/AArch64/aarch64-mops.ll @@ -690,8 +690,8 @@ ; GISel-WITHOUT-MOPS-O0-LABEL: memset_size: ; GISel-WITHOUT-MOPS-O0: // %bb.0: // %entry ; GISel-WITHOUT-MOPS-O0-NEXT: sub sp, sp, #32 -; GISel-WITHOUT-MOPS-O0-NEXT: str x30, [sp, #16] // 8-byte Folded Spill ; GISel-WITHOUT-MOPS-O0-NEXT: .cfi_def_cfa_offset 32 +; GISel-WITHOUT-MOPS-O0-NEXT: str x30, [sp, #16] // 8-byte Folded Spill ; GISel-WITHOUT-MOPS-O0-NEXT: .cfi_offset w30, -16 ; GISel-WITHOUT-MOPS-O0-NEXT: str x1, [sp, #8] // 8-byte Folded Spill ; GISel-WITHOUT-MOPS-O0-NEXT: mov w1, w2 @@ -759,8 +759,8 @@ ; GISel-WITHOUT-MOPS-O0-LABEL: memset_size_volatile: ; GISel-WITHOUT-MOPS-O0: // %bb.0: // %entry ; GISel-WITHOUT-MOPS-O0-NEXT: sub sp, sp, #32 -; GISel-WITHOUT-MOPS-O0-NEXT: str x30, [sp, #16] // 8-byte Folded Spill ; GISel-WITHOUT-MOPS-O0-NEXT: .cfi_def_cfa_offset 32 +; GISel-WITHOUT-MOPS-O0-NEXT: str x30, [sp, #16] // 8-byte Folded Spill ; GISel-WITHOUT-MOPS-O0-NEXT: .cfi_offset w30, -16 ; GISel-WITHOUT-MOPS-O0-NEXT: str x1, [sp, #8] // 8-byte Folded Spill ; GISel-WITHOUT-MOPS-O0-NEXT: mov w1, w2 diff --git a/llvm/test/CodeGen/AArch64/active_lane_mask.ll b/llvm/test/CodeGen/AArch64/active_lane_mask.ll --- a/llvm/test/CodeGen/AArch64/active_lane_mask.ll +++ b/llvm/test/CodeGen/AArch64/active_lane_mask.ll @@ -155,11 +155,10 @@ ; CHECK-LABEL: lane_mask_nxv32i1_i32: ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-1 -; CHECK-NEXT: str p5, [sp, #6, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG -; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: index z0.s, #0, #1 ; CHECK-NEXT: mov z3.s, w0 ; CHECK-NEXT: mov z1.d, z0.d @@ -179,6 +178,7 @@ ; CHECK-NEXT: cmphi p3.s, p0/z, z4.s, z5.s ; CHECK-NEXT: uqadd z5.s, z6.s, z3.s ; CHECK-NEXT: incw z1.s, all, mul #4 +; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: cmphi p4.s, p0/z, z4.s, z5.s ; CHECK-NEXT: uqadd z0.s, z0.s, z3.s ; CHECK-NEXT: uqadd z1.s, z1.s, z3.s @@ -190,6 +190,7 @@ ; CHECK-NEXT: cmphi p4.s, p0/z, z4.s, z1.s ; CHECK-NEXT: uqadd z0.s, z2.s, z3.s ; CHECK-NEXT: uqadd z1.s, z6.s, z3.s +; CHECK-NEXT: str p5, [sp, #6, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: cmphi p5.s, p0/z, z4.s, z0.s ; CHECK-NEXT: cmphi p0.s, p0/z, z4.s, z1.s ; CHECK-NEXT: uzp1 p3.h, p3.h, p4.h @@ -209,13 +210,10 @@ ; CHECK-LABEL: lane_mask_nxv32i1_i64: ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-1 -; CHECK-NEXT: str p7, [sp, #4, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: str p6, [sp, #5, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: str p5, [sp, #6, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG -; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: index z0.d, #0, #1 ; CHECK-NEXT: mov z3.d, x0 ; CHECK-NEXT: mov z1.d, z0.d @@ -241,13 +239,16 @@ ; CHECK-NEXT: uqadd z6.d, z7.d, z3.d ; CHECK-NEXT: mov z25.d, z2.d ; CHECK-NEXT: incd z24.d, all, mul #4 +; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: mov z26.d, z5.d ; CHECK-NEXT: cmphi p4.d, p0/z, z4.d, z6.d ; CHECK-NEXT: uqadd z6.d, z24.d, z3.d ; CHECK-NEXT: incd z25.d, all, mul #4 +; CHECK-NEXT: str p5, [sp, #6, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: cmphi p5.d, p0/z, z4.d, z6.d ; CHECK-NEXT: uqadd z6.d, z25.d, z3.d ; CHECK-NEXT: incd z26.d, all, mul #4 +; CHECK-NEXT: str p6, [sp, #5, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: cmphi p6.d, p0/z, z4.d, z6.d ; CHECK-NEXT: uqadd z6.d, z26.d, z3.d ; CHECK-NEXT: uzp1 p2.s, p2.s, p3.s @@ -280,6 +281,7 @@ ; CHECK-NEXT: cmphi p6.d, p0/z, z4.d, z1.d ; CHECK-NEXT: uqadd z0.d, z25.d, z3.d ; CHECK-NEXT: uqadd z1.d, z26.d, z3.d +; CHECK-NEXT: str p7, [sp, #4, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: cmphi p7.d, p0/z, z4.d, z0.d ; CHECK-NEXT: cmphi p0.d, p0/z, z4.d, z1.d ; CHECK-NEXT: uzp1 p5.s, p5.s, p6.s diff --git a/llvm/test/CodeGen/AArch64/addsub-constant-folding.ll b/llvm/test/CodeGen/AArch64/addsub-constant-folding.ll --- a/llvm/test/CodeGen/AArch64/addsub-constant-folding.ll +++ b/llvm/test/CodeGen/AArch64/addsub-constant-folding.ll @@ -50,8 +50,8 @@ ; CHECK-LABEL: vec_add_const_add_const_extrause: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #32 -; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 32 +; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: movi v1.4s, #8 ; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill @@ -127,8 +127,8 @@ ; CHECK-LABEL: vec_add_const_sub_const_extrause: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #32 -; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 32 +; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: movi v1.4s, #8 ; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill @@ -206,8 +206,8 @@ ; CHECK-LABEL: vec_add_const_const_sub_extrause: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #32 -; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 32 +; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: movi v1.4s, #8 ; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill @@ -283,8 +283,8 @@ ; CHECK-LABEL: vec_sub_const_add_const_extrause: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #32 -; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 32 +; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: movi v1.4s, #8 ; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill @@ -360,8 +360,8 @@ ; CHECK-LABEL: vec_sub_const_sub_const_extrause: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #32 -; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 32 +; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: movi v1.4s, #8 ; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill @@ -439,8 +439,8 @@ ; CHECK-LABEL: vec_sub_const_const_sub_extrause: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #32 -; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 32 +; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: movi v1.4s, #8 ; CHECK-NEXT: sub v0.4s, v0.4s, v1.4s @@ -519,8 +519,8 @@ ; CHECK-LABEL: vec_const_sub_add_const_extrause: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #32 -; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 32 +; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: movi v1.4s, #8 ; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill @@ -599,8 +599,8 @@ ; CHECK-LABEL: vec_const_sub_sub_const_extrause: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #32 -; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 32 +; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: movi v1.4s, #8 ; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill @@ -678,8 +678,8 @@ ; CHECK-LABEL: vec_const_sub_const_sub_extrause: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #32 -; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 32 +; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: movi v1.4s, #8 ; CHECK-NEXT: sub v0.4s, v1.4s, v0.4s diff --git a/llvm/test/CodeGen/AArch64/argument-blocks-array-of-struct.ll b/llvm/test/CodeGen/AArch64/argument-blocks-array-of-struct.ll --- a/llvm/test/CodeGen/AArch64/argument-blocks-array-of-struct.ll +++ b/llvm/test/CodeGen/AArch64/argument-blocks-array-of-struct.ll @@ -452,8 +452,8 @@ ; CHECK-LABEL: caller_in_memory: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #96 -; CHECK-NEXT: str x30, [sp, #80] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 96 +; CHECK-NEXT: str x30, [sp, #80] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: add x8, sp, #8 ; CHECK-NEXT: bl return_in_memory @@ -496,8 +496,8 @@ ; CHECK-LABEL: argument_in_memory: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #96 -; CHECK-NEXT: str x30, [sp, #80] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 96 +; CHECK-NEXT: str x30, [sp, #80] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: adrp x8, in_memory_store ; CHECK-NEXT: add x8, x8, :lo12:in_memory_store diff --git a/llvm/test/CodeGen/AArch64/arm64-custom-call-saved-reg.ll b/llvm/test/CodeGen/AArch64/arm64-custom-call-saved-reg.ll --- a/llvm/test/CodeGen/AArch64/arm64-custom-call-saved-reg.ll +++ b/llvm/test/CodeGen/AArch64/arm64-custom-call-saved-reg.ll @@ -72,6 +72,7 @@ ; CHECK-SAVED-X18: str x18, [sp ; CHECK-SAVED-ALL: str x18, [sp +; CHECK-SAVED-ALL-NEXT: .cfi_def_cfa_offset ; CHECK-SAVED-ALL-NEXT: stp x15, x14, [sp ; CHECK-SAVED-ALL-NEXT: stp x13, x12, [sp ; CHECK-SAVED-ALL-NEXT: stp x11, x10, [sp diff --git a/llvm/test/CodeGen/AArch64/arm64-fp128.ll b/llvm/test/CodeGen/AArch64/arm64-fp128.ll --- a/llvm/test/CodeGen/AArch64/arm64-fp128.ll +++ b/llvm/test/CodeGen/AArch64/arm64-fp128.ll @@ -75,8 +75,8 @@ ; CHECK-LABEL: test_fptosi: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #32 -; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 32 +; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: adrp x8, lhs ; CHECK-NEXT: ldr q0, [x8, :lo12:lhs] @@ -106,8 +106,8 @@ ; CHECK-LABEL: test_fptoui: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #32 -; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 32 +; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: adrp x8, lhs ; CHECK-NEXT: ldr q0, [x8, :lo12:lhs] @@ -247,8 +247,8 @@ ; CHECK-LABEL: test_setcc3: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #48 -; CHECK-NEXT: stp x30, x19, [sp, #32] // 16-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 48 +; CHECK-NEXT: stp x30, x19, [sp, #32] // 16-byte Folded Spill ; CHECK-NEXT: .cfi_offset w19, -8 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: adrp x8, lhs @@ -336,8 +336,8 @@ ; CHECK-LABEL: test_round: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #32 -; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 32 +; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: adrp x8, lhs ; CHECK-NEXT: ldr q0, [x8, :lo12:lhs] diff --git a/llvm/test/CodeGen/AArch64/arm64-large-frame.ll b/llvm/test/CodeGen/AArch64/arm64-large-frame.ll --- a/llvm/test/CodeGen/AArch64/arm64-large-frame.ll +++ b/llvm/test/CodeGen/AArch64/arm64-large-frame.ll @@ -11,12 +11,14 @@ %var2 = alloca i8, i32 16 %var3 = alloca i8, i32 20000000 -; CHECK: sub sp, sp, #4095, lsl #12 -; CHECK: sub sp, sp, #4095, lsl #12 -; CHECK: sub sp, sp, #1575, lsl #12 -; CHECK: sub sp, sp, #2576 -; CHECK: .cfi_def_cfa_offset 40000032 - +; CHECK: sub sp, sp, #4095, lsl #12 // =16773120 +; CHECK-NEXT: .cfi_def_cfa_offset 16773136 +; CHECK-NEXT: sub sp, sp, #4095, lsl #12 // =16773120 +; CHECK-NEXT: .cfi_def_cfa_offset 33546256 +; CHECK-NEXT: sub sp, sp, #1575, lsl #12 // =6451200 +; CHECK-NEXT: .cfi_def_cfa_offset 39997456 +; CHECK-NEXT: sub sp, sp, #2576 +; CHECK-NEXT: .cfi_def_cfa_offset 40000032 ; CHECK: add [[TMP:x[0-9]+]], sp, #4095, lsl #12 ; CHECK: add [[TMP1:x[0-9]+]], [[TMP]], #787, lsl #12 @@ -52,8 +54,11 @@ %var1 = alloca i8, i32 1000000 %var2 = alloca i8, i32 16 %var3 = alloca i8, i32 1000000 -; CHECK: sub sp, sp, #488, lsl #12 + +; CHECK: sub sp, sp, #488, lsl #12 // =1998848 +; CHECK-NEXT: .cfi_def_cfa_offset 1998864 ; CHECK-NEXT: sub sp, sp, #1168 +; CHECK-NEXT: .cfi_def_cfa_offset 2000032 store volatile i8* %var1, i8** @addr ; CHECK: add [[VAR1ADDR:x[0-9]+]], sp, #244, lsl #12 diff --git a/llvm/test/CodeGen/AArch64/arm64-memset-inline.ll b/llvm/test/CodeGen/AArch64/arm64-memset-inline.ll --- a/llvm/test/CodeGen/AArch64/arm64-memset-inline.ll +++ b/llvm/test/CodeGen/AArch64/arm64-memset-inline.ll @@ -98,8 +98,8 @@ ; CHECK-LABEL: bzero_12_stack: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #32 -; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 32 +; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: mov x0, sp ; CHECK-NEXT: str wzr, [sp, #8] @@ -119,8 +119,8 @@ ; CHECK-LABEL: bzero_16_stack: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #32 -; CHECK-NEXT: stp xzr, x30, [sp, #8] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 32 +; CHECK-NEXT: stp xzr, x30, [sp, #8] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: mov x0, sp ; CHECK-NEXT: str xzr, [sp] @@ -139,8 +139,8 @@ ; CHECK-LABEL: bzero_20_stack: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #48 -; CHECK-NEXT: str x30, [sp, #32] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 48 +; CHECK-NEXT: str x30, [sp, #32] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: add x0, sp, #8 ; CHECK-NEXT: stp xzr, xzr, [sp, #8] @@ -160,8 +160,8 @@ ; CHECK-LABEL: bzero_26_stack: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #48 -; CHECK-NEXT: str x30, [sp, #32] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 48 +; CHECK-NEXT: str x30, [sp, #32] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: mov x0, sp ; CHECK-NEXT: stp xzr, xzr, [sp] @@ -182,8 +182,8 @@ ; CHECK-LABEL: bzero_32_stack: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #48 -; CHECK-NEXT: str x30, [sp, #32] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 48 +; CHECK-NEXT: str x30, [sp, #32] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: movi v0.2d, #0000000000000000 ; CHECK-NEXT: mov x0, sp @@ -203,8 +203,8 @@ ; CHECK-LABEL: bzero_40_stack: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #64 -; CHECK-NEXT: str x30, [sp, #48] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 64 +; CHECK-NEXT: str x30, [sp, #48] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: movi v0.2d, #0000000000000000 ; CHECK-NEXT: mov x0, sp @@ -225,8 +225,8 @@ ; CHECK-LABEL: bzero_64_stack: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #80 -; CHECK-NEXT: str x30, [sp, #64] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 80 +; CHECK-NEXT: str x30, [sp, #64] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: movi v0.2d, #0000000000000000 ; CHECK-NEXT: mov x0, sp @@ -247,8 +247,8 @@ ; CHECK-LABEL: bzero_72_stack: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #96 -; CHECK-NEXT: str x30, [sp, #80] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 96 +; CHECK-NEXT: str x30, [sp, #80] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: movi v0.2d, #0000000000000000 ; CHECK-NEXT: mov x0, sp @@ -270,8 +270,8 @@ ; CHECK-LABEL: bzero_128_stack: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #144 -; CHECK-NEXT: str x30, [sp, #128] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 144 +; CHECK-NEXT: str x30, [sp, #128] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: movi v0.2d, #0000000000000000 ; CHECK-NEXT: mov x0, sp @@ -294,8 +294,8 @@ ; CHECK-LABEL: bzero_256_stack: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #272 -; CHECK-NEXT: stp x29, x30, [sp, #256] // 16-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 272 +; CHECK-NEXT: stp x29, x30, [sp, #256] // 16-byte Folded Spill ; CHECK-NEXT: .cfi_offset w30, -8 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: movi v0.2d, #0000000000000000 @@ -360,8 +360,8 @@ ; CHECK-LABEL: memset_12_stack: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #32 -; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 32 +; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: mov x8, #-6148914691236517206 ; CHECK-NEXT: mov x0, sp @@ -403,8 +403,8 @@ ; CHECK-LABEL: memset_20_stack: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #48 -; CHECK-NEXT: str x30, [sp, #32] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 48 +; CHECK-NEXT: str x30, [sp, #32] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: mov x8, #-6148914691236517206 ; CHECK-NEXT: add x0, sp, #8 @@ -425,8 +425,8 @@ ; CHECK-LABEL: memset_26_stack: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #48 -; CHECK-NEXT: str x30, [sp, #32] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 48 +; CHECK-NEXT: str x30, [sp, #32] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: mov x8, #-6148914691236517206 ; CHECK-NEXT: mov x0, sp @@ -448,8 +448,8 @@ ; CHECK-LABEL: memset_32_stack: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #48 -; CHECK-NEXT: str x30, [sp, #32] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 48 +; CHECK-NEXT: str x30, [sp, #32] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: movi v0.16b, #170 ; CHECK-NEXT: mov x0, sp @@ -469,8 +469,8 @@ ; CHECK-LABEL: memset_40_stack: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #64 -; CHECK-NEXT: str x30, [sp, #48] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 64 +; CHECK-NEXT: str x30, [sp, #48] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: movi v0.16b, #170 ; CHECK-NEXT: mov x8, #-6148914691236517206 @@ -492,8 +492,8 @@ ; CHECK-LABEL: memset_64_stack: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #80 -; CHECK-NEXT: str x30, [sp, #64] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 80 +; CHECK-NEXT: str x30, [sp, #64] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: movi v0.16b, #170 ; CHECK-NEXT: mov x0, sp @@ -514,8 +514,8 @@ ; CHECK-LABEL: memset_72_stack: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #96 -; CHECK-NEXT: str x30, [sp, #80] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 96 +; CHECK-NEXT: str x30, [sp, #80] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: movi v0.16b, #170 ; CHECK-NEXT: mov x8, #-6148914691236517206 @@ -538,8 +538,8 @@ ; CHECK-LABEL: memset_128_stack: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #144 -; CHECK-NEXT: str x30, [sp, #128] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 144 +; CHECK-NEXT: str x30, [sp, #128] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: movi v0.16b, #170 ; CHECK-NEXT: mov x0, sp @@ -562,8 +562,8 @@ ; CHECK-LABEL: memset_256_stack: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #272 -; CHECK-NEXT: stp x29, x30, [sp, #256] // 16-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 272 +; CHECK-NEXT: stp x29, x30, [sp, #256] // 16-byte Folded Spill ; CHECK-NEXT: .cfi_offset w30, -8 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: movi v0.16b, #170 diff --git a/llvm/test/CodeGen/AArch64/arm64-neon-mul-div.ll b/llvm/test/CodeGen/AArch64/arm64-neon-mul-div.ll --- a/llvm/test/CodeGen/AArch64/arm64-neon-mul-div.ll +++ b/llvm/test/CodeGen/AArch64/arm64-neon-mul-div.ll @@ -807,10 +807,10 @@ ; CHECK-LABEL: srem16x8: ; CHECK: // %bb.0: ; CHECK-NEXT: stp x26, x25, [sp, #-64]! // 16-byte Folded Spill +; CHECK-NEXT: .cfi_def_cfa_offset 64 ; CHECK-NEXT: stp x24, x23, [sp, #16] // 16-byte Folded Spill ; CHECK-NEXT: stp x22, x21, [sp, #32] // 16-byte Folded Spill ; CHECK-NEXT: stp x20, x19, [sp, #48] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 64 ; CHECK-NEXT: .cfi_offset w19, -8 ; CHECK-NEXT: .cfi_offset w20, -16 ; CHECK-NEXT: .cfi_offset w21, -24 @@ -1171,10 +1171,10 @@ ; CHECK-LABEL: urem16x8: ; CHECK: // %bb.0: ; CHECK-NEXT: stp x26, x25, [sp, #-64]! // 16-byte Folded Spill +; CHECK-NEXT: .cfi_def_cfa_offset 64 ; CHECK-NEXT: stp x24, x23, [sp, #16] // 16-byte Folded Spill ; CHECK-NEXT: stp x22, x21, [sp, #32] // 16-byte Folded Spill ; CHECK-NEXT: stp x20, x19, [sp, #48] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 64 ; CHECK-NEXT: .cfi_offset w19, -8 ; CHECK-NEXT: .cfi_offset w20, -16 ; CHECK-NEXT: .cfi_offset w21, -24 @@ -1469,8 +1469,8 @@ ; CHECK-LABEL: frem2f32: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #64 -; CHECK-NEXT: str x30, [sp, #48] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 64 +; CHECK-NEXT: str x30, [sp, #48] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1 ; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 @@ -1498,8 +1498,8 @@ ; CHECK-LABEL: frem4f32: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #64 -; CHECK-NEXT: str x30, [sp, #48] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 64 +; CHECK-NEXT: str x30, [sp, #48] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: stp q0, q1, [sp, #16] // 32-byte Folded Spill ; CHECK-NEXT: mov s0, v0.s[1] @@ -1554,8 +1554,8 @@ ; CHECK-LABEL: frem2d64: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #64 -; CHECK-NEXT: str x30, [sp, #48] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 64 +; CHECK-NEXT: str x30, [sp, #48] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: stp q0, q1, [sp] // 32-byte Folded Spill ; CHECK-NEXT: mov d0, v0.d[1] diff --git a/llvm/test/CodeGen/AArch64/arm64-patchpoint.ll b/llvm/test/CodeGen/AArch64/arm64-patchpoint.ll --- a/llvm/test/CodeGen/AArch64/arm64-patchpoint.ll +++ b/llvm/test/CodeGen/AArch64/arm64-patchpoint.ll @@ -27,6 +27,7 @@ ; ; CHECK-LABEL: caller_meta_leaf ; CHECK: sub sp, sp, #48 +; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: stp x29, x30, [sp, #32] ; CHECK-NEXT: add x29, sp, #32 ; CHECK: Ltmp diff --git a/llvm/test/CodeGen/AArch64/arm64-shrink-wrapping.ll b/llvm/test/CodeGen/AArch64/arm64-shrink-wrapping.ll --- a/llvm/test/CodeGen/AArch64/arm64-shrink-wrapping.ll +++ b/llvm/test/CodeGen/AArch64/arm64-shrink-wrapping.ll @@ -13,6 +13,7 @@ ; ENABLE-NEXT: b.ge LBB0_2 ; ENABLE-NEXT: ; %bb.1: ; %true ; ENABLE-NEXT: sub sp, sp, #32 +; ENABLE-NEXT: .cfi_def_cfa_offset 32 ; ENABLE-NEXT: stp x29, x30, [sp, #16] ; 16-byte Folded Spill ; ENABLE-NEXT: add x29, sp, #16 ; ENABLE-NEXT: .cfi_def_cfa w29, 16 @@ -30,6 +31,7 @@ ; DISABLE-LABEL: foo: ; DISABLE: ; %bb.0: ; DISABLE-NEXT: sub sp, sp, #32 +; DISABLE-NEXT: .cfi_def_cfa_offset 32 ; DISABLE-NEXT: stp x29, x30, [sp, #16] ; 16-byte Folded Spill ; DISABLE-NEXT: add x29, sp, #16 ; DISABLE-NEXT: .cfi_def_cfa w29, 16 @@ -72,6 +74,7 @@ ; ENABLE-NEXT: cbz w0, LBB1_4 ; ENABLE-NEXT: ; %bb.1: ; %for.body.preheader ; ENABLE-NEXT: stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill +; ENABLE-NEXT: .cfi_def_cfa_offset 32 ; ENABLE-NEXT: stp x29, x30, [sp, #16] ; 16-byte Folded Spill ; ENABLE-NEXT: add x29, sp, #16 ; ENABLE-NEXT: .cfi_def_cfa w29, 16 @@ -99,6 +102,7 @@ ; DISABLE-LABEL: freqSaveAndRestoreOutsideLoop: ; DISABLE: ; %bb.0: ; %entry ; DISABLE-NEXT: stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill +; DISABLE-NEXT: .cfi_def_cfa_offset 32 ; DISABLE-NEXT: stp x29, x30, [sp, #16] ; 16-byte Folded Spill ; DISABLE-NEXT: add x29, sp, #16 ; DISABLE-NEXT: .cfi_def_cfa w29, 16 @@ -159,6 +163,7 @@ ; ENABLE-LABEL: freqSaveAndRestoreOutsideLoop2: ; ENABLE: ; %bb.0: ; %entry ; ENABLE-NEXT: stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill +; ENABLE-NEXT: .cfi_def_cfa_offset 32 ; ENABLE-NEXT: stp x29, x30, [sp, #16] ; 16-byte Folded Spill ; ENABLE-NEXT: add x29, sp, #16 ; ENABLE-NEXT: .cfi_def_cfa w29, 16 @@ -183,6 +188,7 @@ ; DISABLE-LABEL: freqSaveAndRestoreOutsideLoop2: ; DISABLE: ; %bb.0: ; %entry ; DISABLE-NEXT: stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill +; DISABLE-NEXT: .cfi_def_cfa_offset 32 ; DISABLE-NEXT: stp x29, x30, [sp, #16] ; 16-byte Folded Spill ; DISABLE-NEXT: add x29, sp, #16 ; DISABLE-NEXT: .cfi_def_cfa w29, 16 @@ -227,6 +233,7 @@ ; ENABLE-NEXT: cbz w0, LBB3_4 ; ENABLE-NEXT: ; %bb.1: ; %for.body.preheader ; ENABLE-NEXT: stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill +; ENABLE-NEXT: .cfi_def_cfa_offset 32 ; ENABLE-NEXT: stp x29, x30, [sp, #16] ; 16-byte Folded Spill ; ENABLE-NEXT: add x29, sp, #16 ; ENABLE-NEXT: .cfi_def_cfa w29, 16 @@ -255,6 +262,7 @@ ; DISABLE-LABEL: loopInfoSaveOutsideLoop: ; DISABLE: ; %bb.0: ; %entry ; DISABLE-NEXT: stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill +; DISABLE-NEXT: .cfi_def_cfa_offset 32 ; DISABLE-NEXT: stp x29, x30, [sp, #16] ; 16-byte Folded Spill ; DISABLE-NEXT: add x29, sp, #16 ; DISABLE-NEXT: .cfi_def_cfa w29, 16 @@ -313,14 +321,20 @@ ; Check with a more complex case that we do not have restore within the loop and ; save outside. -define i32 @loopInfoRestoreOutsideLoop(i32 %cond, i32 %N) nounwind { +define i32 @loopInfoRestoreOutsideLoop(i32 %cond, i32 %N) nounwind uwtable { ; ENABLE-LABEL: loopInfoRestoreOutsideLoop: ; ENABLE: ; %bb.0: ; %entry ; ENABLE-NEXT: cbz w0, LBB4_4 ; ENABLE-NEXT: ; %bb.1: ; %if.then ; ENABLE-NEXT: stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill +; ENABLE-NEXT: .cfi_def_cfa_offset 32 ; ENABLE-NEXT: stp x29, x30, [sp, #16] ; 16-byte Folded Spill ; ENABLE-NEXT: add x29, sp, #16 +; ENABLE-NEXT: .cfi_def_cfa w29, 16 +; ENABLE-NEXT: .cfi_offset w30, -8 +; ENABLE-NEXT: .cfi_offset w29, -16 +; ENABLE-NEXT: .cfi_offset w19, -24 +; ENABLE-NEXT: .cfi_offset w20, -32 ; ENABLE-NEXT: bl _somethingElse ; ENABLE-NEXT: mov w19, wzr ; ENABLE-NEXT: mov w20, #10 @@ -342,8 +356,14 @@ ; DISABLE-LABEL: loopInfoRestoreOutsideLoop: ; DISABLE: ; %bb.0: ; %entry ; DISABLE-NEXT: stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill +; DISABLE-NEXT: .cfi_def_cfa_offset 32 ; DISABLE-NEXT: stp x29, x30, [sp, #16] ; 16-byte Folded Spill ; DISABLE-NEXT: add x29, sp, #16 +; DISABLE-NEXT: .cfi_def_cfa w29, 16 +; DISABLE-NEXT: .cfi_offset w30, -8 +; DISABLE-NEXT: .cfi_offset w29, -16 +; DISABLE-NEXT: .cfi_offset w19, -24 +; DISABLE-NEXT: .cfi_offset w20, -32 ; DISABLE-NEXT: cbz w0, LBB4_4 ; DISABLE-NEXT: ; %bb.1: ; %if.then ; DISABLE-NEXT: bl _somethingElse @@ -410,12 +430,13 @@ } ; Check that we handle variadic function correctly. -define i32 @variadicFunc(i32 %cond, i32 %count, ...) nounwind { +define i32 @variadicFunc(i32 %cond, i32 %count, ...) nounwind uwtable { ; ENABLE-LABEL: variadicFunc: ; ENABLE: ; %bb.0: ; %entry ; ENABLE-NEXT: cbz w0, LBB6_4 ; ENABLE-NEXT: ; %bb.1: ; %if.then ; ENABLE-NEXT: sub sp, sp, #16 +; ENABLE-NEXT: .cfi_def_cfa_offset 16 ; ENABLE-NEXT: add x8, sp, #16 ; ENABLE-NEXT: cmp w1, #1 ; ENABLE-NEXT: str x8, [sp, #8] @@ -440,6 +461,7 @@ ; DISABLE-LABEL: variadicFunc: ; DISABLE: ; %bb.0: ; %entry ; DISABLE-NEXT: sub sp, sp, #16 +; DISABLE-NEXT: .cfi_def_cfa_offset 16 ; DISABLE-NEXT: cbz w0, LBB6_4 ; DISABLE-NEXT: ; %bb.1: ; %if.then ; DISABLE-NEXT: add x8, sp, #16 @@ -579,6 +601,7 @@ ; ENABLE-NEXT: cbz w0, LBB8_2 ; ENABLE-NEXT: ; %bb.1: ; %if.then ; ENABLE-NEXT: sub sp, sp, #64 +; ENABLE-NEXT: .cfi_def_cfa_offset 64 ; ENABLE-NEXT: stp x29, x30, [sp, #48] ; 16-byte Folded Spill ; ENABLE-NEXT: add x29, sp, #48 ; ENABLE-NEXT: .cfi_def_cfa w29, 16 @@ -600,6 +623,7 @@ ; DISABLE-LABEL: callVariadicFunc: ; DISABLE: ; %bb.0: ; %entry ; DISABLE-NEXT: sub sp, sp, #64 +; DISABLE-NEXT: .cfi_def_cfa_offset 64 ; DISABLE-NEXT: stp x29, x30, [sp, #48] ; 16-byte Folded Spill ; DISABLE-NEXT: add x29, sp, #48 ; DISABLE-NEXT: .cfi_def_cfa w29, 16 @@ -654,6 +678,7 @@ ; ENABLE-NEXT: ret ; ENABLE-NEXT: LBB9_2: ; %if.abort ; ENABLE-NEXT: stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill +; ENABLE-NEXT: .cfi_def_cfa_offset 16 ; ENABLE-NEXT: mov x29, sp ; ENABLE-NEXT: .cfi_def_cfa w29, 16 ; ENABLE-NEXT: .cfi_offset w30, -8 @@ -663,6 +688,7 @@ ; DISABLE-LABEL: noreturn: ; DISABLE: ; %bb.0: ; %entry ; DISABLE-NEXT: stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill +; DISABLE-NEXT: .cfi_def_cfa_offset 16 ; DISABLE-NEXT: mov x29, sp ; DISABLE-NEXT: .cfi_def_cfa w29, 16 ; DISABLE-NEXT: .cfi_offset w30, -8 @@ -702,6 +728,7 @@ ; ENABLE-LABEL: infiniteloop: ; ENABLE: ; %bb.0: ; %entry ; ENABLE-NEXT: stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill +; ENABLE-NEXT: .cfi_def_cfa_offset 32 ; ENABLE-NEXT: stp x29, x30, [sp, #16] ; 16-byte Folded Spill ; ENABLE-NEXT: add x29, sp, #16 ; ENABLE-NEXT: .cfi_def_cfa w29, 16 @@ -729,6 +756,7 @@ ; DISABLE-LABEL: infiniteloop: ; DISABLE: ; %bb.0: ; %entry ; DISABLE-NEXT: stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill +; DISABLE-NEXT: .cfi_def_cfa_offset 32 ; DISABLE-NEXT: stp x29, x30, [sp, #16] ; 16-byte Folded Spill ; DISABLE-NEXT: add x29, sp, #16 ; DISABLE-NEXT: .cfi_def_cfa w29, 16 @@ -775,6 +803,7 @@ ; ENABLE-LABEL: infiniteloop2: ; ENABLE: ; %bb.0: ; %entry ; ENABLE-NEXT: stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill +; ENABLE-NEXT: .cfi_def_cfa_offset 32 ; ENABLE-NEXT: stp x29, x30, [sp, #16] ; 16-byte Folded Spill ; ENABLE-NEXT: add x29, sp, #16 ; ENABLE-NEXT: .cfi_def_cfa w29, 16 @@ -808,6 +837,7 @@ ; DISABLE-LABEL: infiniteloop2: ; DISABLE: ; %bb.0: ; %entry ; DISABLE-NEXT: stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill +; DISABLE-NEXT: .cfi_def_cfa_offset 32 ; DISABLE-NEXT: stp x29, x30, [sp, #16] ; 16-byte Folded Spill ; DISABLE-NEXT: add x29, sp, #16 ; DISABLE-NEXT: .cfi_def_cfa w29, 16 @@ -946,12 +976,13 @@ ; ENABLE-LABEL: stack_realign: ; ENABLE: ; %bb.0: ; ENABLE-NEXT: stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill +; ENABLE-NEXT: .cfi_def_cfa_offset 16 ; ENABLE-NEXT: mov x29, sp -; ENABLE-NEXT: sub x9, sp, #16 -; ENABLE-NEXT: and sp, x9, #0xffffffffffffffe0 ; ENABLE-NEXT: .cfi_def_cfa w29, 16 ; ENABLE-NEXT: .cfi_offset w30, -8 ; ENABLE-NEXT: .cfi_offset w29, -16 +; ENABLE-NEXT: sub x9, sp, #16 +; ENABLE-NEXT: and sp, x9, #0xffffffffffffffe0 ; ENABLE-NEXT: lsl w8, w0, w1 ; ENABLE-NEXT: lsl w9, w1, w0 ; ENABLE-NEXT: cmp w0, w1 @@ -968,12 +999,13 @@ ; DISABLE-LABEL: stack_realign: ; DISABLE: ; %bb.0: ; DISABLE-NEXT: stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill +; DISABLE-NEXT: .cfi_def_cfa_offset 16 ; DISABLE-NEXT: mov x29, sp -; DISABLE-NEXT: sub x9, sp, #16 -; DISABLE-NEXT: and sp, x9, #0xffffffffffffffe0 ; DISABLE-NEXT: .cfi_def_cfa w29, 16 ; DISABLE-NEXT: .cfi_offset w30, -8 ; DISABLE-NEXT: .cfi_offset w29, -16 +; DISABLE-NEXT: sub x9, sp, #16 +; DISABLE-NEXT: and sp, x9, #0xffffffffffffffe0 ; DISABLE-NEXT: lsl w8, w0, w1 ; DISABLE-NEXT: lsl w9, w1, w0 ; DISABLE-NEXT: cmp w0, w1 @@ -1013,14 +1045,13 @@ ; ENABLE-LABEL: stack_realign2: ; ENABLE: ; %bb.0: ; ENABLE-NEXT: stp x28, x27, [sp, #-96]! ; 16-byte Folded Spill +; ENABLE-NEXT: .cfi_def_cfa_offset 96 ; ENABLE-NEXT: stp x26, x25, [sp, #16] ; 16-byte Folded Spill ; ENABLE-NEXT: stp x24, x23, [sp, #32] ; 16-byte Folded Spill ; ENABLE-NEXT: stp x22, x21, [sp, #48] ; 16-byte Folded Spill ; ENABLE-NEXT: stp x20, x19, [sp, #64] ; 16-byte Folded Spill ; ENABLE-NEXT: stp x29, x30, [sp, #80] ; 16-byte Folded Spill ; ENABLE-NEXT: add x29, sp, #80 -; ENABLE-NEXT: sub x9, sp, #32 -; ENABLE-NEXT: and sp, x9, #0xffffffffffffffe0 ; ENABLE-NEXT: .cfi_def_cfa w29, 16 ; ENABLE-NEXT: .cfi_offset w30, -8 ; ENABLE-NEXT: .cfi_offset w29, -16 @@ -1034,6 +1065,8 @@ ; ENABLE-NEXT: .cfi_offset w26, -80 ; ENABLE-NEXT: .cfi_offset w27, -88 ; ENABLE-NEXT: .cfi_offset w28, -96 +; ENABLE-NEXT: sub x9, sp, #32 +; ENABLE-NEXT: and sp, x9, #0xffffffffffffffe0 ; ENABLE-NEXT: add w8, w1, w0 ; ENABLE-NEXT: lsl w9, w0, w1 ; ENABLE-NEXT: lsl w10, w1, w0 @@ -1072,14 +1105,13 @@ ; DISABLE-LABEL: stack_realign2: ; DISABLE: ; %bb.0: ; DISABLE-NEXT: stp x28, x27, [sp, #-96]! ; 16-byte Folded Spill +; DISABLE-NEXT: .cfi_def_cfa_offset 96 ; DISABLE-NEXT: stp x26, x25, [sp, #16] ; 16-byte Folded Spill ; DISABLE-NEXT: stp x24, x23, [sp, #32] ; 16-byte Folded Spill ; DISABLE-NEXT: stp x22, x21, [sp, #48] ; 16-byte Folded Spill ; DISABLE-NEXT: stp x20, x19, [sp, #64] ; 16-byte Folded Spill ; DISABLE-NEXT: stp x29, x30, [sp, #80] ; 16-byte Folded Spill ; DISABLE-NEXT: add x29, sp, #80 -; DISABLE-NEXT: sub x9, sp, #32 -; DISABLE-NEXT: and sp, x9, #0xffffffffffffffe0 ; DISABLE-NEXT: .cfi_def_cfa w29, 16 ; DISABLE-NEXT: .cfi_offset w30, -8 ; DISABLE-NEXT: .cfi_offset w29, -16 @@ -1093,6 +1125,8 @@ ; DISABLE-NEXT: .cfi_offset w26, -80 ; DISABLE-NEXT: .cfi_offset w27, -88 ; DISABLE-NEXT: .cfi_offset w28, -96 +; DISABLE-NEXT: sub x9, sp, #32 +; DISABLE-NEXT: and sp, x9, #0xffffffffffffffe0 ; DISABLE-NEXT: add w8, w1, w0 ; DISABLE-NEXT: lsl w9, w0, w1 ; DISABLE-NEXT: lsl w10, w1, w0 diff --git a/llvm/test/CodeGen/AArch64/cmp-select-sign.ll b/llvm/test/CodeGen/AArch64/cmp-select-sign.ll --- a/llvm/test/CodeGen/AArch64/cmp-select-sign.ll +++ b/llvm/test/CodeGen/AArch64/cmp-select-sign.ll @@ -173,8 +173,8 @@ ; CHECK-LABEL: sign_4xi32_multi_use: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #32 -; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 32 +; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: movi v1.2d, #0xffffffffffffffff ; CHECK-NEXT: cmlt v2.4s, v0.4s, #0 diff --git a/llvm/test/CodeGen/AArch64/cmpxchg-idioms.ll b/llvm/test/CodeGen/AArch64/cmpxchg-idioms.ll --- a/llvm/test/CodeGen/AArch64/cmpxchg-idioms.ll +++ b/llvm/test/CodeGen/AArch64/cmpxchg-idioms.ll @@ -25,8 +25,8 @@ ; OUTLINE-ATOMICS-LABEL: test_return: ; OUTLINE-ATOMICS: ; %bb.0: ; OUTLINE-ATOMICS-NEXT: stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill -; OUTLINE-ATOMICS-NEXT: stp x29, x30, [sp, #16] ; 16-byte Folded Spill ; OUTLINE-ATOMICS-NEXT: .cfi_def_cfa_offset 32 +; OUTLINE-ATOMICS-NEXT: stp x29, x30, [sp, #16] ; 16-byte Folded Spill ; OUTLINE-ATOMICS-NEXT: .cfi_offset w30, -8 ; OUTLINE-ATOMICS-NEXT: .cfi_offset w29, -16 ; OUTLINE-ATOMICS-NEXT: .cfi_offset w19, -24 @@ -75,8 +75,8 @@ ; OUTLINE-ATOMICS-LABEL: test_return_bool: ; OUTLINE-ATOMICS: ; %bb.0: ; OUTLINE-ATOMICS-NEXT: stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill -; OUTLINE-ATOMICS-NEXT: stp x29, x30, [sp, #16] ; 16-byte Folded Spill ; OUTLINE-ATOMICS-NEXT: .cfi_def_cfa_offset 32 +; OUTLINE-ATOMICS-NEXT: stp x29, x30, [sp, #16] ; 16-byte Folded Spill ; OUTLINE-ATOMICS-NEXT: .cfi_offset w30, -8 ; OUTLINE-ATOMICS-NEXT: .cfi_offset w29, -16 ; OUTLINE-ATOMICS-NEXT: .cfi_offset w19, -24 @@ -120,8 +120,8 @@ ; OUTLINE-ATOMICS-LABEL: test_conditional: ; OUTLINE-ATOMICS: ; %bb.0: ; OUTLINE-ATOMICS-NEXT: stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill -; OUTLINE-ATOMICS-NEXT: stp x29, x30, [sp, #16] ; 16-byte Folded Spill ; OUTLINE-ATOMICS-NEXT: .cfi_def_cfa_offset 32 +; OUTLINE-ATOMICS-NEXT: stp x29, x30, [sp, #16] ; 16-byte Folded Spill ; OUTLINE-ATOMICS-NEXT: .cfi_offset w30, -8 ; OUTLINE-ATOMICS-NEXT: .cfi_offset w29, -16 ; OUTLINE-ATOMICS-NEXT: .cfi_offset w19, -24 @@ -166,9 +166,9 @@ ; CHECK-LABEL: test_conditional2: ; CHECK: ; %bb.0: ; %entry ; CHECK-NEXT: stp x22, x21, [sp, #-48]! ; 16-byte Folded Spill +; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: stp x20, x19, [sp, #16] ; 16-byte Folded Spill ; CHECK-NEXT: stp x29, x30, [sp, #32] ; 16-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: .cfi_offset w30, -8 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: .cfi_offset w19, -24 @@ -222,9 +222,9 @@ ; OUTLINE-ATOMICS-LABEL: test_conditional2: ; OUTLINE-ATOMICS: ; %bb.0: ; %entry ; OUTLINE-ATOMICS-NEXT: stp x22, x21, [sp, #-48]! ; 16-byte Folded Spill +; OUTLINE-ATOMICS-NEXT: .cfi_def_cfa_offset 48 ; OUTLINE-ATOMICS-NEXT: stp x20, x19, [sp, #16] ; 16-byte Folded Spill ; OUTLINE-ATOMICS-NEXT: stp x29, x30, [sp, #32] ; 16-byte Folded Spill -; OUTLINE-ATOMICS-NEXT: .cfi_def_cfa_offset 48 ; OUTLINE-ATOMICS-NEXT: .cfi_offset w30, -8 ; OUTLINE-ATOMICS-NEXT: .cfi_offset w29, -16 ; OUTLINE-ATOMICS-NEXT: .cfi_offset w19, -24 diff --git a/llvm/test/CodeGen/AArch64/combine-comparisons-by-cse.ll b/llvm/test/CodeGen/AArch64/combine-comparisons-by-cse.ll --- a/llvm/test/CodeGen/AArch64/combine-comparisons-by-cse.ll +++ b/llvm/test/CodeGen/AArch64/combine-comparisons-by-cse.ll @@ -416,9 +416,9 @@ ; CHECK-LABEL: combine_non_adjacent_cmp_br: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: str x30, [sp, #-48]! // 8-byte Folded Spill +; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: stp x22, x21, [sp, #16] // 16-byte Folded Spill ; CHECK-NEXT: stp x20, x19, [sp, #32] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: .cfi_offset w19, -8 ; CHECK-NEXT: .cfi_offset w20, -16 ; CHECK-NEXT: .cfi_offset w21, -24 @@ -474,8 +474,8 @@ ; CHECK-LABEL: do_nothing_if_resultant_opcodes_would_differ: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: str x30, [sp, #-32]! // 8-byte Folded Spill -; CHECK-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 32 +; CHECK-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill ; CHECK-NEXT: .cfi_offset w19, -8 ; CHECK-NEXT: .cfi_offset w20, -16 ; CHECK-NEXT: .cfi_offset w30, -32 @@ -642,8 +642,8 @@ ; CHECK-LABEL: fcmpri: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: str d8, [sp, #-32]! // 8-byte Folded Spill -; CHECK-NEXT: stp x30, x19, [sp, #16] // 16-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 32 +; CHECK-NEXT: stp x30, x19, [sp, #16] // 16-byte Folded Spill ; CHECK-NEXT: .cfi_offset w19, -8 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: .cfi_offset b8, -32 diff --git a/llvm/test/CodeGen/AArch64/csr-split.ll b/llvm/test/CodeGen/AArch64/csr-split.ll --- a/llvm/test/CodeGen/AArch64/csr-split.ll +++ b/llvm/test/CodeGen/AArch64/csr-split.ll @@ -30,8 +30,8 @@ ; CHECK-APPLE-LABEL: test1: ; CHECK-APPLE: ; %bb.0: ; %entry ; CHECK-APPLE-NEXT: stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill -; CHECK-APPLE-NEXT: stp x29, x30, [sp, #16] ; 16-byte Folded Spill ; CHECK-APPLE-NEXT: .cfi_def_cfa_offset 32 +; CHECK-APPLE-NEXT: stp x29, x30, [sp, #16] ; 16-byte Folded Spill ; CHECK-APPLE-NEXT: .cfi_offset w30, -8 ; CHECK-APPLE-NEXT: .cfi_offset w29, -16 ; CHECK-APPLE-NEXT: .cfi_offset w19, -24 @@ -102,8 +102,8 @@ ; CHECK-APPLE-LABEL: test2: ; CHECK-APPLE: ; %bb.0: ; %entry ; CHECK-APPLE-NEXT: stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill -; CHECK-APPLE-NEXT: stp x29, x30, [sp, #16] ; 16-byte Folded Spill ; CHECK-APPLE-NEXT: .cfi_def_cfa_offset 32 +; CHECK-APPLE-NEXT: stp x29, x30, [sp, #16] ; 16-byte Folded Spill ; CHECK-APPLE-NEXT: .cfi_offset w30, -8 ; CHECK-APPLE-NEXT: .cfi_offset w29, -16 ; CHECK-APPLE-NEXT: .cfi_offset w19, -24 @@ -155,8 +155,8 @@ ; CHECK-LABEL: test3: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: str x30, [sp, #-32]! // 8-byte Folded Spill -; CHECK-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 32 +; CHECK-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill ; CHECK-NEXT: .cfi_offset w19, -8 ; CHECK-NEXT: .cfi_offset w20, -16 ; CHECK-NEXT: .cfi_offset w30, -32 @@ -176,8 +176,8 @@ ; CHECK-APPLE-LABEL: test3: ; CHECK-APPLE: ; %bb.0: ; %entry ; CHECK-APPLE-NEXT: stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill -; CHECK-APPLE-NEXT: stp x29, x30, [sp, #16] ; 16-byte Folded Spill ; CHECK-APPLE-NEXT: .cfi_def_cfa_offset 32 +; CHECK-APPLE-NEXT: stp x29, x30, [sp, #16] ; 16-byte Folded Spill ; CHECK-APPLE-NEXT: .cfi_offset w30, -8 ; CHECK-APPLE-NEXT: .cfi_offset w29, -16 ; CHECK-APPLE-NEXT: .cfi_offset w19, -24 diff --git a/llvm/test/CodeGen/AArch64/fastcc.ll b/llvm/test/CodeGen/AArch64/fastcc.ll --- a/llvm/test/CodeGen/AArch64/fastcc.ll +++ b/llvm/test/CodeGen/AArch64/fastcc.ll @@ -13,6 +13,7 @@ ; CHECK-TAIL-LABEL: func_stack0: ; CHECK-TAIL: sub sp, sp, #48 +; CHECK-TAIL-NEXT: .cfi_def_cfa_offset 48 ; CHECK-TAIL-NEXT: stp x29, x30, [sp, #32] ; CHECK-TAIL-NEXT: add x29, sp, #32 ; CHECK-TAIL: str w{{[0-9]+}}, [sp] @@ -189,6 +190,7 @@ define fastcc void @func_stack32_leaf_local([8 x i64], i128 %stacked0, i128 %stacked1) { ; CHECK-LABEL: func_stack32_leaf_local: ; CHECK: sub sp, sp, #32 +; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: str x20, [sp, #16] ; CHECK: nop ; CHECK-NEXT: //NO_APP @@ -198,6 +200,7 @@ ; CHECK-TAIL-LABEL: func_stack32_leaf_local: ; CHECK-TAIL: sub sp, sp, #32 +; CHECK-TAIL-NEXT: .cfi_def_cfa_offset 32 ; CHECK-TAIL-NEXT: str x20, [sp, #16] ; CHECK-TAIL: nop ; CHECK-TAIL-NEXT: //NO_APP diff --git a/llvm/test/CodeGen/AArch64/fpclamptosat_vec.ll b/llvm/test/CodeGen/AArch64/fpclamptosat_vec.ll --- a/llvm/test/CodeGen/AArch64/fpclamptosat_vec.ll +++ b/llvm/test/CodeGen/AArch64/fpclamptosat_vec.ll @@ -353,9 +353,9 @@ ; CHECK-LABEL: utest_f64i64: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: sub sp, sp, #48 +; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill ; CHECK-NEXT: stp x20, x19, [sp, #32] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: .cfi_offset w19, -8 ; CHECK-NEXT: .cfi_offset w20, -16 ; CHECK-NEXT: .cfi_offset w30, -32 @@ -390,9 +390,9 @@ ; CHECK-LABEL: ustest_f64i64: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: sub sp, sp, #48 +; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill ; CHECK-NEXT: stp x20, x19, [sp, #32] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: .cfi_offset w19, -8 ; CHECK-NEXT: .cfi_offset w20, -16 ; CHECK-NEXT: .cfi_offset w30, -32 @@ -465,9 +465,9 @@ ; CHECK-LABEL: utest_f32i64: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: sub sp, sp, #48 +; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill ; CHECK-NEXT: stp x20, x19, [sp, #32] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: .cfi_offset w19, -8 ; CHECK-NEXT: .cfi_offset w20, -16 ; CHECK-NEXT: .cfi_offset w30, -32 @@ -503,9 +503,9 @@ ; CHECK-LABEL: ustest_f32i64: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: sub sp, sp, #48 +; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill ; CHECK-NEXT: stp x20, x19, [sp, #32] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: .cfi_offset w19, -8 ; CHECK-NEXT: .cfi_offset w20, -16 ; CHECK-NEXT: .cfi_offset w30, -32 @@ -591,9 +591,9 @@ ; CHECK-LABEL: utesth_f16i64: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: sub sp, sp, #48 +; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill ; CHECK-NEXT: stp x20, x19, [sp, #32] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: .cfi_offset w19, -8 ; CHECK-NEXT: .cfi_offset w20, -16 ; CHECK-NEXT: .cfi_offset w30, -32 @@ -629,9 +629,9 @@ ; CHECK-LABEL: ustest_f16i64: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: sub sp, sp, #48 +; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill ; CHECK-NEXT: stp x20, x19, [sp, #32] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: .cfi_offset w19, -8 ; CHECK-NEXT: .cfi_offset w20, -16 ; CHECK-NEXT: .cfi_offset w30, -32 @@ -1002,9 +1002,9 @@ ; CHECK-LABEL: utest_f64i64_mm: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: sub sp, sp, #48 +; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill ; CHECK-NEXT: stp x20, x19, [sp, #32] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: .cfi_offset w19, -8 ; CHECK-NEXT: .cfi_offset w20, -16 ; CHECK-NEXT: .cfi_offset w30, -32 @@ -1042,9 +1042,9 @@ ; CHECK-LABEL: ustest_f64i64_mm: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: sub sp, sp, #48 +; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill ; CHECK-NEXT: stp x20, x19, [sp, #32] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: .cfi_offset w19, -8 ; CHECK-NEXT: .cfi_offset w20, -16 ; CHECK-NEXT: .cfi_offset w30, -32 @@ -1107,9 +1107,9 @@ ; CHECK-LABEL: utest_f32i64_mm: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: sub sp, sp, #48 +; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill ; CHECK-NEXT: stp x20, x19, [sp, #32] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: .cfi_offset w19, -8 ; CHECK-NEXT: .cfi_offset w20, -16 ; CHECK-NEXT: .cfi_offset w30, -32 @@ -1148,9 +1148,9 @@ ; CHECK-LABEL: ustest_f32i64_mm: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: sub sp, sp, #48 +; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill ; CHECK-NEXT: stp x20, x19, [sp, #32] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: .cfi_offset w19, -8 ; CHECK-NEXT: .cfi_offset w20, -16 ; CHECK-NEXT: .cfi_offset w30, -32 @@ -1226,9 +1226,9 @@ ; CHECK-LABEL: utesth_f16i64_mm: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: sub sp, sp, #48 +; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill ; CHECK-NEXT: stp x20, x19, [sp, #32] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: .cfi_offset w19, -8 ; CHECK-NEXT: .cfi_offset w20, -16 ; CHECK-NEXT: .cfi_offset w30, -32 @@ -1267,9 +1267,9 @@ ; CHECK-LABEL: ustest_f16i64_mm: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: sub sp, sp, #48 +; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill ; CHECK-NEXT: stp x20, x19, [sp, #32] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: .cfi_offset w19, -8 ; CHECK-NEXT: .cfi_offset w20, -16 ; CHECK-NEXT: .cfi_offset w30, -32 diff --git a/llvm/test/CodeGen/AArch64/fptosi-sat-vector.ll b/llvm/test/CodeGen/AArch64/fptosi-sat-vector.ll --- a/llvm/test/CodeGen/AArch64/fptosi-sat-vector.ll +++ b/llvm/test/CodeGen/AArch64/fptosi-sat-vector.ll @@ -248,8 +248,8 @@ ; CHECK-LABEL: test_signed_v1f128_v1i32: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #32 -; CHECK-NEXT: stp x30, x19, [sp, #16] // 16-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 32 +; CHECK-NEXT: stp x30, x19, [sp, #16] // 16-byte Folded Spill ; CHECK-NEXT: .cfi_offset w19, -8 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: adrp x8, .LCPI14_0 @@ -286,10 +286,10 @@ ; CHECK-LABEL: test_signed_v2f128_v2i32: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #112 +; CHECK-NEXT: .cfi_def_cfa_offset 112 ; CHECK-NEXT: str x30, [sp, #64] // 8-byte Folded Spill ; CHECK-NEXT: stp x22, x21, [sp, #80] // 16-byte Folded Spill ; CHECK-NEXT: stp x20, x19, [sp, #96] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 112 ; CHECK-NEXT: .cfi_offset w19, -8 ; CHECK-NEXT: .cfi_offset w20, -16 ; CHECK-NEXT: .cfi_offset w21, -24 @@ -355,10 +355,10 @@ ; CHECK-LABEL: test_signed_v3f128_v3i32: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #128 +; CHECK-NEXT: .cfi_def_cfa_offset 128 ; CHECK-NEXT: str x30, [sp, #80] // 8-byte Folded Spill ; CHECK-NEXT: stp x22, x21, [sp, #96] // 16-byte Folded Spill ; CHECK-NEXT: stp x20, x19, [sp, #112] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 128 ; CHECK-NEXT: .cfi_offset w19, -8 ; CHECK-NEXT: .cfi_offset w20, -16 ; CHECK-NEXT: .cfi_offset w21, -24 @@ -445,10 +445,10 @@ ; CHECK-LABEL: test_signed_v4f128_v4i32: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #144 +; CHECK-NEXT: .cfi_def_cfa_offset 144 ; CHECK-NEXT: str x30, [sp, #96] // 8-byte Folded Spill ; CHECK-NEXT: stp x22, x21, [sp, #112] // 16-byte Folded Spill ; CHECK-NEXT: stp x20, x19, [sp, #128] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 144 ; CHECK-NEXT: .cfi_offset w19, -8 ; CHECK-NEXT: .cfi_offset w20, -16 ; CHECK-NEXT: .cfi_offset w21, -24 @@ -808,12 +808,12 @@ ; CHECK-LABEL: test_signed_v2f32_v2i100: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #80 +; CHECK-NEXT: .cfi_def_cfa_offset 80 ; CHECK-NEXT: str d10, [sp, #16] // 8-byte Folded Spill ; CHECK-NEXT: stp d9, d8, [sp, #24] // 16-byte Folded Spill ; CHECK-NEXT: str x30, [sp, #40] // 8-byte Folded Spill ; CHECK-NEXT: stp x22, x21, [sp, #48] // 16-byte Folded Spill ; CHECK-NEXT: stp x20, x19, [sp, #64] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 80 ; CHECK-NEXT: .cfi_offset w19, -8 ; CHECK-NEXT: .cfi_offset w20, -16 ; CHECK-NEXT: .cfi_offset w21, -24 @@ -874,12 +874,12 @@ ; CHECK-LABEL: test_signed_v2f32_v2i128: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #80 +; CHECK-NEXT: .cfi_def_cfa_offset 80 ; CHECK-NEXT: str d10, [sp, #16] // 8-byte Folded Spill ; CHECK-NEXT: stp d9, d8, [sp, #24] // 16-byte Folded Spill ; CHECK-NEXT: str x30, [sp, #40] // 8-byte Folded Spill ; CHECK-NEXT: stp x22, x21, [sp, #48] // 16-byte Folded Spill ; CHECK-NEXT: stp x20, x19, [sp, #64] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 80 ; CHECK-NEXT: .cfi_offset w19, -8 ; CHECK-NEXT: .cfi_offset w20, -16 ; CHECK-NEXT: .cfi_offset w21, -24 @@ -1080,6 +1080,7 @@ ; CHECK-LABEL: test_signed_v4f32_v4i100: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #128 +; CHECK-NEXT: .cfi_def_cfa_offset 128 ; CHECK-NEXT: str d10, [sp, #32] // 8-byte Folded Spill ; CHECK-NEXT: stp d9, d8, [sp, #40] // 16-byte Folded Spill ; CHECK-NEXT: str x30, [sp, #56] // 8-byte Folded Spill @@ -1087,7 +1088,6 @@ ; CHECK-NEXT: stp x24, x23, [sp, #80] // 16-byte Folded Spill ; CHECK-NEXT: stp x22, x21, [sp, #96] // 16-byte Folded Spill ; CHECK-NEXT: stp x20, x19, [sp, #112] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 128 ; CHECK-NEXT: .cfi_offset w19, -8 ; CHECK-NEXT: .cfi_offset w20, -16 ; CHECK-NEXT: .cfi_offset w21, -24 @@ -1184,6 +1184,7 @@ ; CHECK-LABEL: test_signed_v4f32_v4i128: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #128 +; CHECK-NEXT: .cfi_def_cfa_offset 128 ; CHECK-NEXT: str d10, [sp, #32] // 8-byte Folded Spill ; CHECK-NEXT: stp d9, d8, [sp, #40] // 16-byte Folded Spill ; CHECK-NEXT: str x30, [sp, #56] // 8-byte Folded Spill @@ -1191,7 +1192,6 @@ ; CHECK-NEXT: stp x24, x23, [sp, #80] // 16-byte Folded Spill ; CHECK-NEXT: stp x22, x21, [sp, #96] // 16-byte Folded Spill ; CHECK-NEXT: stp x20, x19, [sp, #112] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 128 ; CHECK-NEXT: .cfi_offset w19, -8 ; CHECK-NEXT: .cfi_offset w20, -16 ; CHECK-NEXT: .cfi_offset w21, -24 @@ -1466,12 +1466,12 @@ ; CHECK-LABEL: test_signed_v2f64_v2i100: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #80 +; CHECK-NEXT: .cfi_def_cfa_offset 80 ; CHECK-NEXT: str d10, [sp, #16] // 8-byte Folded Spill ; CHECK-NEXT: stp d9, d8, [sp, #24] // 16-byte Folded Spill ; CHECK-NEXT: str x30, [sp, #40] // 8-byte Folded Spill ; CHECK-NEXT: stp x22, x21, [sp, #48] // 16-byte Folded Spill ; CHECK-NEXT: stp x20, x19, [sp, #64] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 80 ; CHECK-NEXT: .cfi_offset w19, -8 ; CHECK-NEXT: .cfi_offset w20, -16 ; CHECK-NEXT: .cfi_offset w21, -24 @@ -1532,12 +1532,12 @@ ; CHECK-LABEL: test_signed_v2f64_v2i128: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #80 +; CHECK-NEXT: .cfi_def_cfa_offset 80 ; CHECK-NEXT: str d10, [sp, #16] // 8-byte Folded Spill ; CHECK-NEXT: stp d9, d8, [sp, #24] // 16-byte Folded Spill ; CHECK-NEXT: str x30, [sp, #40] // 8-byte Folded Spill ; CHECK-NEXT: stp x22, x21, [sp, #48] // 16-byte Folded Spill ; CHECK-NEXT: stp x20, x19, [sp, #64] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 80 ; CHECK-NEXT: .cfi_offset w19, -8 ; CHECK-NEXT: .cfi_offset w20, -16 ; CHECK-NEXT: .cfi_offset w21, -24 @@ -1832,6 +1832,7 @@ ; CHECK-LABEL: test_signed_v4f16_v4i100: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #112 +; CHECK-NEXT: .cfi_def_cfa_offset 112 ; CHECK-NEXT: str d10, [sp, #16] // 8-byte Folded Spill ; CHECK-NEXT: stp d9, d8, [sp, #24] // 16-byte Folded Spill ; CHECK-NEXT: str x30, [sp, #40] // 8-byte Folded Spill @@ -1839,7 +1840,6 @@ ; CHECK-NEXT: stp x24, x23, [sp, #64] // 16-byte Folded Spill ; CHECK-NEXT: stp x22, x21, [sp, #80] // 16-byte Folded Spill ; CHECK-NEXT: stp x20, x19, [sp, #96] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 112 ; CHECK-NEXT: .cfi_offset w19, -8 ; CHECK-NEXT: .cfi_offset w20, -16 ; CHECK-NEXT: .cfi_offset w21, -24 @@ -1939,6 +1939,7 @@ ; CHECK-LABEL: test_signed_v4f16_v4i128: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #112 +; CHECK-NEXT: .cfi_def_cfa_offset 112 ; CHECK-NEXT: str d10, [sp, #16] // 8-byte Folded Spill ; CHECK-NEXT: stp d9, d8, [sp, #24] // 16-byte Folded Spill ; CHECK-NEXT: str x30, [sp, #40] // 8-byte Folded Spill @@ -1946,7 +1947,6 @@ ; CHECK-NEXT: stp x24, x23, [sp, #64] // 16-byte Folded Spill ; CHECK-NEXT: stp x22, x21, [sp, #80] // 16-byte Folded Spill ; CHECK-NEXT: stp x20, x19, [sp, #96] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 112 ; CHECK-NEXT: .cfi_offset w19, -8 ; CHECK-NEXT: .cfi_offset w20, -16 ; CHECK-NEXT: .cfi_offset w21, -24 @@ -2580,6 +2580,7 @@ ; CHECK-LABEL: test_signed_v8f16_v8i100: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #192 +; CHECK-NEXT: .cfi_def_cfa_offset 192 ; CHECK-NEXT: str d10, [sp, #64] // 8-byte Folded Spill ; CHECK-NEXT: stp d9, d8, [sp, #80] // 16-byte Folded Spill ; CHECK-NEXT: stp x29, x30, [sp, #96] // 16-byte Folded Spill @@ -2588,7 +2589,6 @@ ; CHECK-NEXT: stp x24, x23, [sp, #144] // 16-byte Folded Spill ; CHECK-NEXT: stp x22, x21, [sp, #160] // 16-byte Folded Spill ; CHECK-NEXT: stp x20, x19, [sp, #176] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 192 ; CHECK-NEXT: .cfi_offset w19, -8 ; CHECK-NEXT: .cfi_offset w20, -16 ; CHECK-NEXT: .cfi_offset w21, -24 @@ -2789,6 +2789,7 @@ ; CHECK-LABEL: test_signed_v8f16_v8i128: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #192 +; CHECK-NEXT: .cfi_def_cfa_offset 192 ; CHECK-NEXT: str d10, [sp, #64] // 8-byte Folded Spill ; CHECK-NEXT: stp d9, d8, [sp, #80] // 16-byte Folded Spill ; CHECK-NEXT: stp x29, x30, [sp, #96] // 16-byte Folded Spill @@ -2797,7 +2798,6 @@ ; CHECK-NEXT: stp x24, x23, [sp, #144] // 16-byte Folded Spill ; CHECK-NEXT: stp x22, x21, [sp, #160] // 16-byte Folded Spill ; CHECK-NEXT: stp x20, x19, [sp, #176] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 192 ; CHECK-NEXT: .cfi_offset w19, -8 ; CHECK-NEXT: .cfi_offset w20, -16 ; CHECK-NEXT: .cfi_offset w21, -24 diff --git a/llvm/test/CodeGen/AArch64/fptoui-sat-vector.ll b/llvm/test/CodeGen/AArch64/fptoui-sat-vector.ll --- a/llvm/test/CodeGen/AArch64/fptoui-sat-vector.ll +++ b/llvm/test/CodeGen/AArch64/fptoui-sat-vector.ll @@ -248,8 +248,8 @@ ; CHECK-LABEL: test_unsigned_v1f128_v1i32: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #32 -; CHECK-NEXT: stp x30, x19, [sp, #16] // 16-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 32 +; CHECK-NEXT: stp x30, x19, [sp, #16] // 16-byte Folded Spill ; CHECK-NEXT: .cfi_offset w19, -8 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: adrp x8, .LCPI14_0 @@ -279,9 +279,9 @@ ; CHECK-LABEL: test_unsigned_v2f128_v2i32: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #96 +; CHECK-NEXT: .cfi_def_cfa_offset 96 ; CHECK-NEXT: str x30, [sp, #64] // 8-byte Folded Spill ; CHECK-NEXT: stp x20, x19, [sp, #80] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 96 ; CHECK-NEXT: .cfi_offset w19, -8 ; CHECK-NEXT: .cfi_offset w20, -16 ; CHECK-NEXT: .cfi_offset w30, -32 @@ -332,9 +332,9 @@ ; CHECK-LABEL: test_unsigned_v3f128_v3i32: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #112 +; CHECK-NEXT: .cfi_def_cfa_offset 112 ; CHECK-NEXT: str x30, [sp, #80] // 8-byte Folded Spill ; CHECK-NEXT: stp x20, x19, [sp, #96] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 112 ; CHECK-NEXT: .cfi_offset w19, -8 ; CHECK-NEXT: .cfi_offset w20, -16 ; CHECK-NEXT: .cfi_offset w30, -32 @@ -400,9 +400,9 @@ ; CHECK-LABEL: test_unsigned_v4f128_v4i32: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #128 +; CHECK-NEXT: .cfi_def_cfa_offset 128 ; CHECK-NEXT: str x30, [sp, #96] // 8-byte Folded Spill ; CHECK-NEXT: stp x20, x19, [sp, #112] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 128 ; CHECK-NEXT: .cfi_offset w19, -8 ; CHECK-NEXT: .cfi_offset w20, -16 ; CHECK-NEXT: .cfi_offset w30, -32 @@ -722,10 +722,10 @@ ; CHECK-LABEL: test_unsigned_v2f32_v2i100: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #64 +; CHECK-NEXT: .cfi_def_cfa_offset 64 ; CHECK-NEXT: stp d9, d8, [sp, #16] // 16-byte Folded Spill ; CHECK-NEXT: stp x30, x21, [sp, #32] // 16-byte Folded Spill ; CHECK-NEXT: stp x20, x19, [sp, #48] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 64 ; CHECK-NEXT: .cfi_offset w19, -8 ; CHECK-NEXT: .cfi_offset w20, -16 ; CHECK-NEXT: .cfi_offset w21, -24 @@ -774,10 +774,10 @@ ; CHECK-LABEL: test_unsigned_v2f32_v2i128: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #64 +; CHECK-NEXT: .cfi_def_cfa_offset 64 ; CHECK-NEXT: stp d9, d8, [sp, #16] // 16-byte Folded Spill ; CHECK-NEXT: str x30, [sp, #32] // 8-byte Folded Spill ; CHECK-NEXT: stp x20, x19, [sp, #48] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 64 ; CHECK-NEXT: .cfi_offset w19, -8 ; CHECK-NEXT: .cfi_offset w20, -16 ; CHECK-NEXT: .cfi_offset w30, -32 @@ -947,12 +947,12 @@ ; CHECK-LABEL: test_unsigned_v4f32_v4i100: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #112 +; CHECK-NEXT: .cfi_def_cfa_offset 112 ; CHECK-NEXT: stp d9, d8, [sp, #32] // 16-byte Folded Spill ; CHECK-NEXT: stp x30, x25, [sp, #48] // 16-byte Folded Spill ; CHECK-NEXT: stp x24, x23, [sp, #64] // 16-byte Folded Spill ; CHECK-NEXT: stp x22, x21, [sp, #80] // 16-byte Folded Spill ; CHECK-NEXT: stp x20, x19, [sp, #96] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 112 ; CHECK-NEXT: .cfi_offset w19, -8 ; CHECK-NEXT: .cfi_offset w20, -16 ; CHECK-NEXT: .cfi_offset w21, -24 @@ -1031,12 +1031,12 @@ ; CHECK-LABEL: test_unsigned_v4f32_v4i128: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #112 +; CHECK-NEXT: .cfi_def_cfa_offset 112 ; CHECK-NEXT: stp d9, d8, [sp, #32] // 16-byte Folded Spill ; CHECK-NEXT: str x30, [sp, #48] // 8-byte Folded Spill ; CHECK-NEXT: stp x24, x23, [sp, #64] // 16-byte Folded Spill ; CHECK-NEXT: stp x22, x21, [sp, #80] // 16-byte Folded Spill ; CHECK-NEXT: stp x20, x19, [sp, #96] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 112 ; CHECK-NEXT: .cfi_offset w19, -8 ; CHECK-NEXT: .cfi_offset w20, -16 ; CHECK-NEXT: .cfi_offset w21, -24 @@ -1262,10 +1262,10 @@ ; CHECK-LABEL: test_unsigned_v2f64_v2i100: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #64 +; CHECK-NEXT: .cfi_def_cfa_offset 64 ; CHECK-NEXT: stp d9, d8, [sp, #16] // 16-byte Folded Spill ; CHECK-NEXT: stp x30, x21, [sp, #32] // 16-byte Folded Spill ; CHECK-NEXT: stp x20, x19, [sp, #48] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 64 ; CHECK-NEXT: .cfi_offset w19, -8 ; CHECK-NEXT: .cfi_offset w20, -16 ; CHECK-NEXT: .cfi_offset w21, -24 @@ -1313,10 +1313,10 @@ ; CHECK-LABEL: test_unsigned_v2f64_v2i128: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #64 +; CHECK-NEXT: .cfi_def_cfa_offset 64 ; CHECK-NEXT: stp d9, d8, [sp, #16] // 16-byte Folded Spill ; CHECK-NEXT: str x30, [sp, #32] // 8-byte Folded Spill ; CHECK-NEXT: stp x20, x19, [sp, #48] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 64 ; CHECK-NEXT: .cfi_offset w19, -8 ; CHECK-NEXT: .cfi_offset w20, -16 ; CHECK-NEXT: .cfi_offset w30, -32 @@ -1564,12 +1564,12 @@ ; CHECK-LABEL: test_unsigned_v4f16_v4i100: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #96 +; CHECK-NEXT: .cfi_def_cfa_offset 96 ; CHECK-NEXT: stp d9, d8, [sp, #16] // 16-byte Folded Spill ; CHECK-NEXT: stp x30, x25, [sp, #32] // 16-byte Folded Spill ; CHECK-NEXT: stp x24, x23, [sp, #48] // 16-byte Folded Spill ; CHECK-NEXT: stp x22, x21, [sp, #64] // 16-byte Folded Spill ; CHECK-NEXT: stp x20, x19, [sp, #80] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 96 ; CHECK-NEXT: .cfi_offset w19, -8 ; CHECK-NEXT: .cfi_offset w20, -16 ; CHECK-NEXT: .cfi_offset w21, -24 @@ -1651,12 +1651,12 @@ ; CHECK-LABEL: test_unsigned_v4f16_v4i128: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #96 +; CHECK-NEXT: .cfi_def_cfa_offset 96 ; CHECK-NEXT: stp d9, d8, [sp, #16] // 16-byte Folded Spill ; CHECK-NEXT: str x30, [sp, #32] // 8-byte Folded Spill ; CHECK-NEXT: stp x24, x23, [sp, #48] // 16-byte Folded Spill ; CHECK-NEXT: stp x22, x21, [sp, #64] // 16-byte Folded Spill ; CHECK-NEXT: stp x20, x19, [sp, #80] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 96 ; CHECK-NEXT: .cfi_offset w19, -8 ; CHECK-NEXT: .cfi_offset w20, -16 ; CHECK-NEXT: .cfi_offset w21, -24 @@ -2162,6 +2162,7 @@ ; CHECK-LABEL: test_unsigned_v8f16_v8i100: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #176 +; CHECK-NEXT: .cfi_def_cfa_offset 176 ; CHECK-NEXT: stp d9, d8, [sp, #64] // 16-byte Folded Spill ; CHECK-NEXT: stp x29, x30, [sp, #80] // 16-byte Folded Spill ; CHECK-NEXT: stp x28, x27, [sp, #96] // 16-byte Folded Spill @@ -2169,7 +2170,6 @@ ; CHECK-NEXT: stp x24, x23, [sp, #128] // 16-byte Folded Spill ; CHECK-NEXT: stp x22, x21, [sp, #144] // 16-byte Folded Spill ; CHECK-NEXT: stp x20, x19, [sp, #160] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 176 ; CHECK-NEXT: .cfi_offset w19, -8 ; CHECK-NEXT: .cfi_offset w20, -16 ; CHECK-NEXT: .cfi_offset w21, -24 @@ -2339,6 +2339,7 @@ ; CHECK-LABEL: test_unsigned_v8f16_v8i128: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #176 +; CHECK-NEXT: .cfi_def_cfa_offset 176 ; CHECK-NEXT: stp d9, d8, [sp, #64] // 16-byte Folded Spill ; CHECK-NEXT: stp x29, x30, [sp, #80] // 16-byte Folded Spill ; CHECK-NEXT: stp x28, x27, [sp, #96] // 16-byte Folded Spill @@ -2346,7 +2347,6 @@ ; CHECK-NEXT: stp x24, x23, [sp, #128] // 16-byte Folded Spill ; CHECK-NEXT: stp x22, x21, [sp, #144] // 16-byte Folded Spill ; CHECK-NEXT: stp x20, x19, [sp, #160] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 176 ; CHECK-NEXT: .cfi_offset w19, -8 ; CHECK-NEXT: .cfi_offset w20, -16 ; CHECK-NEXT: .cfi_offset w21, -24 diff --git a/llvm/test/CodeGen/AArch64/framelayout-sve-calleesaves-fix.mir b/llvm/test/CodeGen/AArch64/framelayout-sve-calleesaves-fix.mir --- a/llvm/test/CodeGen/AArch64/framelayout-sve-calleesaves-fix.mir +++ b/llvm/test/CodeGen/AArch64/framelayout-sve-calleesaves-fix.mir @@ -6,13 +6,15 @@ ; CHECK-LABEL: fix_restorepoint_p4: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill + ; CHECK-NEXT: .cfi_def_cfa_offset 16 + ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-2 + ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #1, mul vl] // 16-byte Folded Spill + ; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG - ; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG - ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: // implicit-def: $z8 ; CHECK-NEXT: // implicit-def: $p4 ; CHECK-NEXT: addvl sp, sp, #1 diff --git a/llvm/test/CodeGen/AArch64/framelayout-sve.mir b/llvm/test/CodeGen/AArch64/framelayout-sve.mir --- a/llvm/test/CodeGen/AArch64/framelayout-sve.mir +++ b/llvm/test/CodeGen/AArch64/framelayout-sve.mir @@ -57,9 +57,12 @@ # CHECK: bb.0.entry: # CHECK-NEXT: $sp = frame-setup STRXpre killed $[[SCRATCH:[a-z0-9]+]], $sp, -16 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION +# CHECK-NEXT: frame-setup CFI_INSTRUCTION # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -2 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION # CHECK-NEXT: $sp = frame-setup SUBXri $sp, 16, 0 -# CHECK-COUNT-2: frame-setup CFI_INSTRUCTION +# CHECK-NEXT: frame-setup CFI_INSTRUCTION # CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 2 # CHECK-NEXT: $sp = frame-destroy ADDXri $sp, 16, 0 @@ -67,11 +70,16 @@ # CHECK-NEXT: RET_ReallyLR # ASM-LABEL: test_allocate_sve: -# ASM: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 16 * VG +# ASM: .cfi_def_cfa_offset 16 # ASM-NEXT: .cfi_offset w29, -16 +# ASM: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG +# ASM: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 16 * VG # -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +32, DW_OP_plus, DW_OP_consts +16, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_offset: +16 # UNWINDINFO-NEXT: DW_CFA_offset: reg29 -16 +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +16, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +32, DW_OP_plus, DW_OP_consts +16, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus + name: test_allocate_sve stack: - { id: 0, stack-id: scalable-vector, size: 18, alignment: 2 } @@ -95,10 +103,13 @@ # CHECK: bb.0.entry: # CHECK-NEXT: $sp = frame-setup STRXpre killed $[[SCRATCH:[a-z0-9]+]], $sp, -32 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION # CHECK-NEXT: frame-setup STPXi killed $x21, killed $x20, $sp, 2 +# CHECK-COUNT-3: frame-setup CFI_INSTRUCTION # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -2 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION # CHECK-NEXT: $sp = frame-setup SUBXri $sp, 16, 0 -# CHECK-COUNT-4: frame-setup CFI_INSTRUCTION +# CHECK-NEXT: frame-setup CFI_INSTRUCTION # # CHECK-NEXT: $x20 = IMPLICIT_DEF # CHECK-NEXT: $x21 = IMPLICIT_DEF @@ -109,15 +120,19 @@ # CHECK-NEXT: RET_ReallyLR # # ASM-LABEL: test_allocate_sve_gpr_callee_saves: -# ASM: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x30, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 48 + 16 * VG -# ASM-NEXT: .cfi_offset w20, -8 +# ASM: .cfi_def_cfa_offset 32 +# ASM: .cfi_offset w20, -8 # ASM-NEXT: .cfi_offset w21, -16 # ASM-NEXT: .cfi_offset w29, -32 +# ASM: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 16 * VG +# ASM: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x30, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 48 + 16 * VG # -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +48, DW_OP_plus, DW_OP_consts +16, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO-NEXT: DW_CFA_offset: reg20 -8 +# UNWINDINFO: DW_CFA_def_cfa_offset: +32 +# UNWINDINFO: DW_CFA_offset: reg20 -8 # UNWINDINFO-NEXT: DW_CFA_offset: reg21 -16 # UNWINDINFO-NEXT: DW_CFA_offset: reg29 -32 +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +32, DW_OP_plus, DW_OP_consts +16, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +48, DW_OP_plus, DW_OP_consts +16, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus name: test_allocate_sve_gpr_callee_saves stack: - { id: 0, stack-id: scalable-vector, size: 18, alignment: 2 } @@ -142,20 +157,23 @@ # CHECK: bb.0.entry: # CHECK-NEXT: $sp = frame-setup STPXpre killed $fp, killed $lr, $sp, -2 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION # CHECK-NEXT: $fp = frame-setup ADDXri $sp, 0, 0 +# CHECK-COUNT-3: frame-setup CFI_INSTRUCTION # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -2 # CHECK-NEXT: $[[TMP:x[0-9]+]] = frame-setup SUBXri $sp, 16, 0 # CHECK-NEXT: $sp = ANDXri killed $[[TMP]] -# CHECK-COUNT-3: frame-setup CFI_INSTRUCTION # CHECK-NEXT: $sp = frame-destroy ADDXri $fp, 0, 0 # CHECK-NEXT: $sp, $fp, $lr = frame-destroy LDPXpost $sp, 2 # CHECK-NEXT: RET_ReallyLR # # ASM-LABEL: test_allocate_sve_gpr_realigned: +# ASM: .cfi_def_cfa_offset 16 # ASM: .cfi_def_cfa w29, 16 # ASM-NEXT: .cfi_offset w30, -8 # ASM-NEXT: .cfi_offset w29, -16 # +# UNWINDINFO: DW_CFA_def_cfa_offset: +16 # UNWINDINFO: DW_CFA_def_cfa: reg29 +16 # UNWINDINFO-NEXT: DW_CFA_offset: reg30 -8 # UNWINDINFO-NEXT: DW_CFA_offset: reg29 -16 @@ -184,9 +202,11 @@ # CHECK: bb.0.entry: # CHECK-NEXT: $sp = frame-setup STRXpre killed $[[SCRATCH:[a-z0-9]+]], $sp, -16 +# CHECK-COUNT-2: frame-setup CFI_INSTRUCTION # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -3 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION # CHECK-NEXT: $sp = frame-setup SUBXri $sp, 16, 0 -# CHECK-COUNT-2: frame-setup CFI_INSTRUCTION +# CHECK-NEXT: frame-setup CFI_INSTRUCTION # CHECK-NEXT: $[[TMP:x[0-9]+]] = ADDXri $sp, 16 # CHECK-NEXT: STR_ZXI $z0, killed $[[TMP]], 2 @@ -201,11 +221,15 @@ # CHECK-NEXT: RET_ReallyLR # # ASM-LABEL: test_address_sve: -# ASM: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 24 * VG +# ASM: .cfi_def_cfa_offset 16 # ASM-NEXT: .cfi_offset w29, -16 +# ASM: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +# ASM: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 24 * VG # -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +32, DW_OP_plus, DW_OP_consts +24, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_offset: +16 # UNWINDINFO-NEXT: DW_CFA_offset: reg29 -16 +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +24, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +32, DW_OP_plus, DW_OP_consts +24, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus name: test_address_sve frameInfo: @@ -242,10 +266,11 @@ # CHECK: bb.0.entry: # CHECK-NEXT: $sp = frame-setup STPXpre killed $fp, killed $lr, $sp, -2 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION # CHECK-NEXT: $fp = frame-setup ADDXri $sp, 0, 0 +# CHECK-COUNT-3: frame-setup CFI_INSTRUCTION # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -3 # CHECK-NEXT: $sp = frame-setup SUBXri $sp, 16, 0 -# CHECK-COUNT-3: frame-setup CFI_INSTRUCTION # CHECK-NEXT: STR_ZXI $z0, $fp, -1 # CHECK-NEXT: STR_ZXI $z1, $fp, -2 @@ -257,10 +282,12 @@ # CHECK-NEXT: RET_ReallyLR # # ASM-LABEL: test_address_sve_fp: +# ASM: .cfi_def_cfa_offset 16 # ASM: .cfi_def_cfa w29, 16 # ASM-NEXT: .cfi_offset w30, -8 # ASM-NEXT: .cfi_offset w29, -16 # +# UNWINDINFO: DW_CFA_def_cfa_offset: +16 # UNWINDINFO: DW_CFA_def_cfa: reg29 +16 # UNWINDINFO-NEXT: DW_CFA_offset: reg30 -8 # UNWINDINFO-NEXT: DW_CFA_offset: reg29 -16 @@ -298,9 +325,10 @@ # CHECK: bb.0.entry: # CHECK-NEXT: $sp = frame-setup STRXpre killed $[[SCRATCH:[a-z0-9]+]], $sp, -16 +# CHECK-COUNT-2: frame-setup CFI_INSTRUCTION # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -1 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION # CHECK-NEXT: $sp = frame-setup SUBXri $sp, 16, 0 -# CHECK-COUNT-2: frame-setup CFI_INSTRUCTION # CHECK: $[[TMP:x[0-9]+]] = ADDVL_XXI $sp, 1 # CHECK-NEXT: $x0 = LDRXui killed $[[TMP]], 4 @@ -311,11 +339,15 @@ # CHECK-NEXT: RET_ReallyLR # # ASM-LABEL: test_stack_arg_sve: -# ASM: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 8 * VG +# ASM: .cfi_def_cfa_offset 16 # ASM-NEXT: .cfi_offset w29, -16 +# ASM: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +# ASM: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 8 * VG # -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +32, DW_OP_plus, DW_OP_consts +8, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_offset: +16 # UNWINDINFO-NEXT: DW_CFA_offset: reg29 -16 +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +8, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +32, DW_OP_plus, DW_OP_consts +8, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus name: test_stack_arg_sve fixedStack: @@ -348,16 +380,25 @@ # CHECK: bb.0.entry: # CHECK-NEXT: $sp = frame-setup STRXpre killed $[[SCRATCH:[a-z0-9]+]], $sp, -16 +# CHECK-COUNT-2: frame-setup CFI_INSTRUCTION # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -32 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -32 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -32 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -32 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -32 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -32 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -32 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -32 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -1 -# CHECK-COUNT-2: frame-setup CFI_INSTRUCTION +# CHECK-NEXT: frame-setup CFI_INSTRUCTION # CHECK-NEXT: $[[TMP2:x[0-9]+]] = ADDVL_XXI $sp, 1 # CHECK-NEXT: STR_ZXI $z0, killed $[[TMP2]], 255 @@ -378,11 +419,29 @@ # CHECK-NEXT: RET_ReallyLR # # ASM-LABEL: test_address_sve_out_of_range: -# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x88, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 2056 * VG +# ASM: .cfi_def_cfa_offset 16 # ASM-NEXT: .cfi_offset w29, -16 +# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x02, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 256 * VG +# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x04, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 512 * VG +# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x06, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 768 * VG +# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1024 * VG +# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x0a, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1280 * VG +# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x0c, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1536 * VG +# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x0e, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1792 * VG +# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 2048 * VG +# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x88, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 2056 * VG # -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +2056, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_offset: +16 # UNWINDINFO-NEXT: DW_CFA_offset: reg29 -16 +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +256, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +512, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +768, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +1024, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +1280, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +1536, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +1792, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +2048, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +2056, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus name: test_address_sve_out_of_range frameInfo: maxAlignment: 16 @@ -419,11 +478,13 @@ # CHECK: RET_ReallyLR # # ASM-LABEL: test_address_gpr_vla: +# ASM: .cfi_def_cfa_offset 32 # ASM: .cfi_def_cfa w29, 32 # ASM-NEXT: .cfi_offset w19, -16 # ASM-NEXT: .cfi_offset w30, -24 # ASM-NEXT: .cfi_offset w29, -32 # +# UNWINDINFO: DW_CFA_def_cfa_offset: +32 # UNWINDINFO: DW_CFA_def_cfa: reg29 +32 # UNWINDINFO-NEXT: DW_CFA_offset: reg19 -16 # UNWINDINFO-NEXT: DW_CFA_offset: reg30 -24 @@ -445,12 +506,14 @@ --- ... # CHECK-LABEL: name: save_restore_pregs_sve +# CHECK-COUNT-2: frame-setup CFI_INSTRUCTION # CHECK: $sp = frame-setup ADDVL_XXI $sp, -1 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION # CHECK: frame-setup STR_PXI killed $p6, $sp, 5 # CHECK: frame-setup STR_PXI killed $p5, $sp, 6 # CHECK: frame-setup STR_PXI killed $p4, $sp, 7 # CHECK: $sp = frame-setup SUBXri $sp, 32, 0 -# CHECK-COUNT-2: frame-setup CFI_INSTRUCTION +# CHECK-NEXT: frame-setup CFI_INSTRUCTION # CHECK: $sp = frame-destroy ADDXri $sp, 32, 0 # CHECK: $p6 = frame-destroy LDR_PXI $sp, 5 @@ -460,11 +523,15 @@ # CHECK: RET_ReallyLR # # ASM-LABEL: save_restore_pregs_sve: -# ASM: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x30, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 48 + 8 * VG +# ASM: .cfi_def_cfa_offset 16 # ASM-NEXT: .cfi_offset w29, -16 +# ASM: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +# ASM: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x30, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 48 + 8 * VG # +# UNWINDINFO: DW_CFA_def_cfa_offset: +16 +# UNWINDINFO: DW_CFA_offset: reg29 -16 +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +8, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus # UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +48, DW_OP_plus, DW_OP_consts +8, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO-NEXT: DW_CFA_offset: reg29 -16 name: save_restore_pregs_sve stack: - { id: 0, stack-id: default, size: 32, alignment: 16 } @@ -480,12 +547,15 @@ ... # CHECK-LABEL: name: save_restore_zregs_sve # CHECK: $sp = frame-setup STRXpre killed $fp, $sp, -16 +# CHECK-COUNT-2: frame-setup CFI_INSTRUCTION # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -3 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION # CHECK-NEXT: frame-setup STR_ZXI killed $z10, $sp, 0 # CHECK-NEXT: frame-setup STR_ZXI killed $z9, $sp, 1 # CHECK-NEXT: frame-setup STR_ZXI killed $z8, $sp, 2 +# CHECK-COUNT-3: frame-setup CFI_INSTRUCTION # CHECK-NEXT: $sp = frame-setup SUBXri $sp, 32, 0 -# CHECK-COUNT-5: frame-setup CFI_INSTRUCTION +# CHECK-NEXT: frame-setup CFI_INSTRUCTION # CHECK: $sp = frame-destroy ADDXri $sp, 32, 0 # CHECK-NEXT: $z10 = frame-destroy LDR_ZXI $sp, 0 @@ -496,16 +566,21 @@ # CHECK-NEXT: RET_ReallyLR # # ASM-LABEL: save_restore_zregs_sve: -# ASM: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x30, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 48 + 24 * VG -# ASM-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG +# ASM: .cfi_def_cfa_offset 16 +# ASM-NEXT: .cfi_offset w29, -16 +# ASM: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +# ASM: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG # ASM-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG # ASM-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG +# ASM: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x30, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 48 + 24 * VG -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +48, DW_OP_plus, DW_OP_consts +24, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO-NEXT: DW_CFA_expression: reg72 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -8, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_offset: +16 +# UNWINDINFO-NEXT: DW_CFA_offset: reg29 -16 +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +24, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_expression: reg72 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -8, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus # UNWINDINFO-NEXT: DW_CFA_expression: reg73 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -16, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus # UNWINDINFO-NEXT: DW_CFA_expression: reg74 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -24, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO-NEXT: DW_CFA_offset: reg29 -16 +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +48, DW_OP_plus, DW_OP_consts +24, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus name: save_restore_zregs_sve stack: @@ -528,8 +603,11 @@ # # CHECK-LABEL: name: save_restore_sve # CHECK: $sp = frame-setup STPXpre killed ${{[a-z0-9]+}}, killed $x21, $sp, -4 +# CHECK: frame-setup CFI_INSTRUCTION # CHECK: frame-setup STPXi killed $x20, killed $x19, $sp, 2 +# CHECK-COUNT-4: frame-setup CFI_INSTRUCTION # CHECK: $sp = frame-setup ADDVL_XXI $sp, -18 +# CHECK: frame-setup CFI_INSTRUCTION # CHECK: frame-setup STR_PXI killed $p15, $sp, 4 # CHECK: frame-setup STR_PXI killed $p14, $sp, 5 # CHECK: frame-setup STR_PXI killed $p5, $sp, 14 @@ -538,9 +616,11 @@ # CHECK: frame-setup STR_ZXI killed $z22, $sp, 3 # CHECK: frame-setup STR_ZXI killed $z9, $sp, 16 # CHECK: frame-setup STR_ZXI killed $z8, $sp, 17 +# CHECK-COUNT-8: frame-setup CFI_INSTRUCTION # CHECK: $sp = frame-setup ADDVL_XXI $sp, -1 +# CHECK: frame-setup CFI_INSTRUCTION # CHECK: $sp = frame-setup SUBXri $sp, 32, 0 -# CHECK-COUNT-13: frame-setup CFI_INSTRUCTION +# CHECK: frame-setup CFI_INSTRUCTION # CHECK: $sp = frame-destroy ADDXri $sp, 32, 0 # CHECK: $sp = frame-destroy ADDVL_XXI $sp, 1 @@ -558,8 +638,13 @@ # CHECK: RET_ReallyLR # # ASM-LABEL: save_restore_sve: -# ASM: .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x98, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 64 + 152 * VG -# ASM-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 32 - 8 * VG +# ASM: .cfi_def_cfa_offset 32 +# ASM: .cfi_offset w19, -8 +# ASM-NEXT: .cfi_offset w20, -16 +# ASM-NEXT: .cfi_offset w21, -24 +# ASM-NEXT: .cfi_offset w29, -32 +# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 144 * VG +# ASM: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 32 - 8 * VG # ASM-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 32 - 16 * VG # ASM-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 32 - 24 * VG # ASM-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 32 - 32 * VG @@ -567,13 +652,16 @@ # ASM-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 32 - 48 * VG # ASM-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 32 - 56 * VG # ASM-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 32 - 64 * VG -# ASM-NEXT: .cfi_offset w19, -8 -# ASM-NEXT: .cfi_offset w20, -16 -# ASM-NEXT: .cfi_offset w21, -24 -# ASM-NEXT: .cfi_offset w29, -32 +# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x98, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 152 * VG +# ASM: .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x98, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 64 + 152 * VG # -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +64, DW_OP_plus, DW_OP_consts +152, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO-NEXT: DW_CFA_expression: reg72 DW_OP_consts -32, DW_OP_plus, DW_OP_consts -8, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_offset: +32 +# UNWINDINFO: DW_CFA_offset: reg19 -8 +# UNWINDINFO-NEXT: DW_CFA_offset: reg20 -16 +# UNWINDINFO-NEXT: DW_CFA_offset: reg21 -24 +# UNWINDINFO-NEXT: DW_CFA_offset: reg29 -32 +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +32, DW_OP_plus, DW_OP_consts +144, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_expression: reg72 DW_OP_consts -32, DW_OP_plus, DW_OP_consts -8, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus # UNWINDINFO-NEXT: DW_CFA_expression: reg73 DW_OP_consts -32, DW_OP_plus, DW_OP_consts -16, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus # UNWINDINFO-NEXT: DW_CFA_expression: reg74 DW_OP_consts -32, DW_OP_plus, DW_OP_consts -24, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus # UNWINDINFO-NEXT: DW_CFA_expression: reg75 DW_OP_consts -32, DW_OP_plus, DW_OP_consts -32, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus @@ -581,10 +669,8 @@ # UNWINDINFO-NEXT: DW_CFA_expression: reg77 DW_OP_consts -32, DW_OP_plus, DW_OP_consts -48, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus # UNWINDINFO-NEXT: DW_CFA_expression: reg78 DW_OP_consts -32, DW_OP_plus, DW_OP_consts -56, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus # UNWINDINFO-NEXT: DW_CFA_expression: reg79 DW_OP_consts -32, DW_OP_plus, DW_OP_consts -64, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO-NEXT: DW_CFA_offset: reg19 -8 -# UNWINDINFO-NEXT: DW_CFA_offset: reg20 -16 -# UNWINDINFO-NEXT: DW_CFA_offset: reg21 -24 -# UNWINDINFO-NEXT: DW_CFA_offset: reg29 -32 +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +32, DW_OP_plus, DW_OP_consts +152, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +64, DW_OP_plus, DW_OP_consts +152, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus name: save_restore_sve stack: @@ -625,7 +711,9 @@ # # CHECK-LABEL: name: save_restore_sve_realign # CHECK: $sp = frame-setup STPXpre killed $fp, killed $lr, $sp, -2 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION # CHECK-NEXT: $fp = frame-setup ADDXri $sp, 0, 0 +# CHECK-COUNT-3: frame-setup CFI_INSTRUCTION # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -18 # CHECK-NEXT: STR_PXI killed $p15, $sp, 4 # CHECK-NEXT: STR_PXI killed $p14, $sp, 5 @@ -635,10 +723,10 @@ # CHECK-NEXT: STR_ZXI killed $z22, $sp, 3 # CHECK: STR_ZXI killed $z9, $sp, 16 # CHECK-NEXT: STR_ZXI killed $z8, $sp, 17 +# CHECK-COUNT-8: frame-setup CFI_INSTRUCTION # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -1 # CHECK-NEXT: $[[TMP:x[0-9]+]] = frame-setup SUBXri $sp, 16, 0 # CHECK-NEXT: $sp = ANDXri killed $[[TMP]] -# CHECK-COUNT-11: frame-setup CFI_INSTRUCTION # CHECK: $sp = frame-destroy ADDVL_XXI $fp, -18 # CHECK-NEXT: $p15 = frame-destroy LDR_PXI $sp, 4 @@ -654,8 +742,11 @@ # CHECK-NEXT: RET_ReallyLR # # ASM-LABEL: save_restore_sve_realign: +# ASM: .cfi_def_cfa_offset 16 # ASM: .cfi_def_cfa w29, 16 -# ASM-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG +# ASM-NEXT: .cfi_offset w30, -8 +# ASM-NEXT: .cfi_offset w29, -16 +# ASM: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG # ASM-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG # ASM-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG # ASM-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 32 * VG @@ -663,11 +754,12 @@ # ASM-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 16 - 48 * VG # ASM-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG # ASM-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG -# ASM-NEXT: .cfi_offset w30, -8 -# ASM-NEXT: .cfi_offset w29, -16 # +# UNWINDINFO: DW_CFA_def_cfa_offset: +16 # UNWINDINFO: DW_CFA_def_cfa: reg29 +16 -# UNWINDINFO-NEXT: DW_CFA_expression: reg72 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -8, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus +# UNWINDINFO-NEXT: DW_CFA_offset: reg30 -8 +# UNWINDINFO-NEXT: DW_CFA_offset: reg29 -16 +# UNWINDINFO: DW_CFA_expression: reg72 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -8, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus # UNWINDINFO-NEXT: DW_CFA_expression: reg73 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -16, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus # UNWINDINFO-NEXT: DW_CFA_expression: reg74 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -24, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus # UNWINDINFO-NEXT: DW_CFA_expression: reg75 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -32, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus @@ -675,8 +767,6 @@ # UNWINDINFO-NEXT: DW_CFA_expression: reg77 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -48, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus # UNWINDINFO-NEXT: DW_CFA_expression: reg78 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -56, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus # UNWINDINFO-NEXT: DW_CFA_expression: reg79 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -64, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO-NEXT: DW_CFA_offset: reg30 -8 -# UNWINDINFO-NEXT: DW_CFA_offset: reg29 -16 name: save_restore_sve_realign stack: - { id: 0, stack-id: scalable-vector, size: 16, alignment: 16 } @@ -745,21 +835,29 @@ # # CHECK: bb.0.entry: # CHECK-NEXT: $sp = frame-setup STRXpre killed $[[SCRATCH:[a-z0-9]+]], $sp, -16 +# CHECK-COUNT-2: frame-setup CFI_INSTRUCTION # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -3 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION # CHECK-NEXT: STR_PXI killed $p15, $sp, 6 # CHECK-NEXT: STR_PXI killed $p4, $sp, 7 # CHECK-NEXT: STR_ZXI killed $z23, $sp, 1 # CHECK-NEXT: STR_ZXI killed $z8, $sp, 2 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION # CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -7 -# CHECK-COUNT-3: frame-setup CFI_INSTRUCTION +# CHECK-NEXT: frame-setup CFI_INSTRUCTION +# # ASM-LABEL: frame_layout: -# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xd0, 0x00, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 80 * VG -# ASM-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG +# ASM: .cfi_def_cfa_offset 16 # ASM-NEXT: .cfi_offset w29, -16 +# ASM: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +# ASM: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG +# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xd0, 0x00, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 80 * VG # -# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +80, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus -# UNWINDINFO-NEXT: DW_CFA_expression: reg72 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -8, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_offset: +16 # UNWINDINFO-NEXT: DW_CFA_offset: reg29 -16 +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +24, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_expression: reg72 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -8, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +80, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus name: frame_layout stack: - { id: 0, type: default, size: 32, alignment: 16, stack-id: scalable-vector } diff --git a/llvm/test/CodeGen/AArch64/framelayout-unaligned-fp.ll b/llvm/test/CodeGen/AArch64/framelayout-unaligned-fp.ll --- a/llvm/test/CodeGen/AArch64/framelayout-unaligned-fp.ll +++ b/llvm/test/CodeGen/AArch64/framelayout-unaligned-fp.ll @@ -27,6 +27,7 @@ ; CHECK-LABEL: b: ; CHECK: str d8, [sp, #-32]! +; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: stp x29, x30, [sp, #8] ; CHECK-NEXT: str x19, [sp, #24] ; CHECK-NEXT: add x29, sp, #8 diff --git a/llvm/test/CodeGen/AArch64/i128-math.ll b/llvm/test/CodeGen/AArch64/i128-math.ll --- a/llvm/test/CodeGen/AArch64/i128-math.ll +++ b/llvm/test/CodeGen/AArch64/i128-math.ll @@ -462,8 +462,8 @@ ; CHECK-LABEL: i128_saturating_mul: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-32]! // 8-byte Folded Spill -; CHECK-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 32 +; CHECK-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill ; CHECK-NEXT: .cfi_offset w19, -8 ; CHECK-NEXT: .cfi_offset w20, -16 ; CHECK-NEXT: .cfi_offset w30, -32 diff --git a/llvm/test/CodeGen/AArch64/isinf.ll b/llvm/test/CodeGen/AArch64/isinf.ll --- a/llvm/test/CodeGen/AArch64/isinf.ll +++ b/llvm/test/CodeGen/AArch64/isinf.ll @@ -59,8 +59,8 @@ ; CHECK-LABEL: replace_isinf_call_f128: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #32 -; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 32 +; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: str q0, [sp] ; CHECK-NEXT: ldrb w8, [sp, #15] diff --git a/llvm/test/CodeGen/AArch64/large-stack-cmp.ll b/llvm/test/CodeGen/AArch64/large-stack-cmp.ll --- a/llvm/test/CodeGen/AArch64/large-stack-cmp.ll +++ b/llvm/test/CodeGen/AArch64/large-stack-cmp.ll @@ -5,14 +5,16 @@ ; CHECK-LABEL: foo: ; CHECK: ; %bb.0: ; CHECK-NEXT: stp x28, x27, [sp, #-32]! ; 16-byte Folded Spill +; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: stp x29, x30, [sp, #16] ; 16-byte Folded Spill -; CHECK-NEXT: sub sp, sp, #1, lsl #12 ; =4096 -; CHECK-NEXT: sub sp, sp, #80 -; CHECK-NEXT: .cfi_def_cfa_offset 4208 ; CHECK-NEXT: .cfi_offset w30, -8 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: .cfi_offset w27, -24 ; CHECK-NEXT: .cfi_offset w28, -32 +; CHECK-NEXT: sub sp, sp, #1, lsl #12 ; =4096 +; CHECK-NEXT: .cfi_def_cfa_offset 4128 +; CHECK-NEXT: sub sp, sp, #80 +; CHECK-NEXT: .cfi_def_cfa_offset 4208 ; CHECK-NEXT: adds x8, sp, #1, lsl #12 ; =4096 ; CHECK-NEXT: cmn x8, #32 ; CHECK-NEXT: b.eq LBB0_2 diff --git a/llvm/test/CodeGen/AArch64/large-stack.ll b/llvm/test/CodeGen/AArch64/large-stack.ll --- a/llvm/test/CodeGen/AArch64/large-stack.ll +++ b/llvm/test/CodeGen/AArch64/large-stack.ll @@ -24,8 +24,10 @@ attributes #0 = { noinline optnone "frame-pointer"="all" } ; CHECK: stp x[[SPILL_REG1:[0-9]+]], x[[SPILL_REG2:[0-9]+]], [sp, #-[[SPILL_OFFSET1:[0-9]+]]] +; CHECK-NEXT: .cfi_def_cfa_offset [[SPILL_OFFSET1]] ; CHECK-NEXT: str x[[SPILL_REG3:[0-9]+]], [sp, #[[SPILL_OFFSET2:[0-9]+]]] ; CHECK-NEXT: mov x[[FRAME:[0-9]+]], sp +; CHECK-NEXT: .cfi_def_cfa w[[FRAME]], [[SPILL_OFFSET1]] ; CHECK-COUNT-128: sub sp, sp, #[[STACK1:[0-9]+]], lsl #12 ; CHECK-NEXT: sub sp, sp, #[[STACK2:[0-9]+]], lsl #12 ; CHECK-NEXT: sub sp, sp, #[[STACK3:[0-9]+]] diff --git a/llvm/test/CodeGen/AArch64/ldst-paired-aliasing.ll b/llvm/test/CodeGen/AArch64/ldst-paired-aliasing.ll --- a/llvm/test/CodeGen/AArch64/ldst-paired-aliasing.ll +++ b/llvm/test/CodeGen/AArch64/ldst-paired-aliasing.ll @@ -13,8 +13,8 @@ ; CHECK-LABEL: main: ; CHECK: // %bb.0: // %for.body.lr.ph.i.i.i.i.i.i63 ; CHECK-NEXT: sub sp, sp, #112 -; CHECK-NEXT: str x30, [sp, #96] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 112 +; CHECK-NEXT: str x30, [sp, #96] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl _Z5setupv ; CHECK-NEXT: movi v0.4s, #1 diff --git a/llvm/test/CodeGen/AArch64/local_vars.ll b/llvm/test/CodeGen/AArch64/local_vars.ll --- a/llvm/test/CodeGen/AArch64/local_vars.ll +++ b/llvm/test/CodeGen/AArch64/local_vars.ll @@ -30,7 +30,11 @@ ; CHECK-WITHFP-ARM64-LABEL: trivial_fp_func: ; CHECK-WITHFP-ARM64: stp x29, x30, [sp, #-16]! +; CHECK-WITHFP-ARM64-NEXT: .cfi_def_cfa_offset 16 ; CHECK-WITHFP-ARM64-NEXT: mov x29, sp +; CHECK-WITHFP-ARM64-NEXT: .cfi_def_cfa w29, 16 +; CHECK-WITHFP-ARM64-NEXT: .cfi_offset w30, -8 +; CHECK-WITHFP-ARM64-NEXT: .cfi_offset w29, -16 ; Dont't really care, but it would be a Bad Thing if this came after the epilogue. ; CHECK-WITHFP-ARM64: bl foo diff --git a/llvm/test/CodeGen/AArch64/machine-licm-sink-instr.ll b/llvm/test/CodeGen/AArch64/machine-licm-sink-instr.ll --- a/llvm/test/CodeGen/AArch64/machine-licm-sink-instr.ll +++ b/llvm/test/CodeGen/AArch64/machine-licm-sink-instr.ll @@ -9,8 +9,8 @@ ; CHECK-LABEL: sink_load_and_copy: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: stp x30, x21, [sp, #-32]! // 16-byte Folded Spill -; CHECK-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 32 +; CHECK-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill ; CHECK-NEXT: .cfi_offset w19, -8 ; CHECK-NEXT: .cfi_offset w20, -16 ; CHECK-NEXT: .cfi_offset w21, -24 @@ -63,8 +63,8 @@ ; CHECK-LABEL: cant_sink_successive_call: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: stp x30, x21, [sp, #-32]! // 16-byte Folded Spill -; CHECK-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 32 +; CHECK-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill ; CHECK-NEXT: .cfi_offset w19, -8 ; CHECK-NEXT: .cfi_offset w20, -16 ; CHECK-NEXT: .cfi_offset w21, -24 @@ -120,8 +120,8 @@ ; CHECK-LABEL: cant_sink_successive_store: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: stp x30, x21, [sp, #-32]! // 16-byte Folded Spill -; CHECK-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 32 +; CHECK-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill ; CHECK-NEXT: .cfi_offset w19, -8 ; CHECK-NEXT: .cfi_offset w20, -16 ; CHECK-NEXT: .cfi_offset w21, -24 diff --git a/llvm/test/CodeGen/AArch64/machine-outliner-throw2.ll b/llvm/test/CodeGen/AArch64/machine-outliner-throw2.ll --- a/llvm/test/CodeGen/AArch64/machine-outliner-throw2.ll +++ b/llvm/test/CodeGen/AArch64/machine-outliner-throw2.ll @@ -20,6 +20,7 @@ ; NOOMIT-LABEL: _Z2f1v: ; NOOMIT: // %bb.0: // %entry ; NOOMIT-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill +; NOOMIT-NEXT: .cfi_def_cfa_offset 16 ; NOOMIT-NEXT: mov x29, sp ; NOOMIT-NEXT: .cfi_def_cfa w29, 16 ; NOOMIT-NEXT: .cfi_offset w30, -8 @@ -46,6 +47,7 @@ ; NOOMIT-LABEL: _Z2f2v: ; NOOMIT: // %bb.0: // %entry ; NOOMIT-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill +; NOOMIT-NEXT: .cfi_def_cfa_offset 16 ; NOOMIT-NEXT: mov x29, sp ; NOOMIT-NEXT: .cfi_def_cfa w29, 16 ; NOOMIT-NEXT: .cfi_offset w30, -8 diff --git a/llvm/test/CodeGen/AArch64/neg-imm.ll b/llvm/test/CodeGen/AArch64/neg-imm.ll --- a/llvm/test/CodeGen/AArch64/neg-imm.ll +++ b/llvm/test/CodeGen/AArch64/neg-imm.ll @@ -9,8 +9,8 @@ ; CHECK-LABEL: test: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: str x30, [sp, #-32]! // 8-byte Folded Spill -; CHECK-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 32 +; CHECK-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill ; CHECK-NEXT: .cfi_offset w19, -8 ; CHECK-NEXT: .cfi_offset w20, -16 ; CHECK-NEXT: .cfi_offset w30, -32 diff --git a/llvm/test/CodeGen/AArch64/peephole-and-tst.ll b/llvm/test/CodeGen/AArch64/peephole-and-tst.ll --- a/llvm/test/CodeGen/AArch64/peephole-and-tst.ll +++ b/llvm/test/CodeGen/AArch64/peephole-and-tst.ll @@ -151,8 +151,8 @@ ; CHECK-LABEL: test_and3: ; CHECK: // %bb.0: ; CHECK-NEXT: str x30, [sp, #-32]! // 8-byte Folded Spill -; CHECK-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 32 +; CHECK-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill ; CHECK-NEXT: .cfi_offset w19, -8 ; CHECK-NEXT: .cfi_offset w20, -16 ; CHECK-NEXT: .cfi_offset w30, -32 diff --git a/llvm/test/CodeGen/AArch64/ragreedy-local-interval-cost.ll b/llvm/test/CodeGen/AArch64/ragreedy-local-interval-cost.ll --- a/llvm/test/CodeGen/AArch64/ragreedy-local-interval-cost.ll +++ b/llvm/test/CodeGen/AArch64/ragreedy-local-interval-cost.ll @@ -9,11 +9,11 @@ ; CHECK-LABEL: run_test: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: sub sp, sp, #96 +; CHECK-NEXT: .cfi_def_cfa_offset 96 ; CHECK-NEXT: stp d15, d14, [sp, #32] // 16-byte Folded Spill ; CHECK-NEXT: stp d13, d12, [sp, #48] // 16-byte Folded Spill ; CHECK-NEXT: stp d11, d10, [sp, #64] // 16-byte Folded Spill ; CHECK-NEXT: stp d9, d8, [sp, #80] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 96 ; CHECK-NEXT: .cfi_offset b8, -8 ; CHECK-NEXT: .cfi_offset b9, -16 ; CHECK-NEXT: .cfi_offset b10, -24 @@ -171,6 +171,7 @@ ; CHECK-NEXT: str q2, [x8, #496] ; CHECK-NEXT: add sp, sp, #96 ; CHECK-NEXT: ret +; CH`ECK-NEXT: .cfi_offset b9, -16 entry: br label %for.cond1.preheader diff --git a/llvm/test/CodeGen/AArch64/settag.ll b/llvm/test/CodeGen/AArch64/settag.ll --- a/llvm/test/CodeGen/AArch64/settag.ll +++ b/llvm/test/CodeGen/AArch64/settag.ll @@ -148,8 +148,8 @@ ; CHECK-LABEL: stg_alloca17: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: sub sp, sp, #288 -; CHECK-NEXT: str x29, [sp, #272] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 288 +; CHECK-NEXT: str x29, [sp, #272] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: mov x8, #256 ; CHECK-NEXT: .LBB11_1: // %entry diff --git a/llvm/test/CodeGen/AArch64/shrink-wrapping-vla.ll b/llvm/test/CodeGen/AArch64/shrink-wrapping-vla.ll --- a/llvm/test/CodeGen/AArch64/shrink-wrapping-vla.ll +++ b/llvm/test/CodeGen/AArch64/shrink-wrapping-vla.ll @@ -78,6 +78,7 @@ ; CHECK-LABEL: f ; CHECK: stp x29, x30, [sp, #-16]! +; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: mov x29, sp ; VLA allocation diff --git a/llvm/test/CodeGen/AArch64/sibling-call.ll b/llvm/test/CodeGen/AArch64/sibling-call.ll --- a/llvm/test/CodeGen/AArch64/sibling-call.ll +++ b/llvm/test/CodeGen/AArch64/sibling-call.ll @@ -26,8 +26,8 @@ ; CHECK-LABEL: caller_to8_from0: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #32 -; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 32 +; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: mov w8, #42 ; CHECK-NEXT: str x8, [sp] @@ -58,8 +58,8 @@ ; CHECK-LABEL: caller_to16_from8: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #32 -; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 32 +; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl callee_stack16 ; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload diff --git a/llvm/test/CodeGen/AArch64/split-vector-insert.ll b/llvm/test/CodeGen/AArch64/split-vector-insert.ll --- a/llvm/test/CodeGen/AArch64/split-vector-insert.ll +++ b/llvm/test/CodeGen/AArch64/split-vector-insert.ll @@ -17,9 +17,10 @@ ; CHECK-LABEL: test_nxv2i64_v8i64: ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-3 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG -; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: cntd x8 ; CHECK-NEXT: mov w9, #2 ; CHECK-NEXT: sub x8, x8, #2 @@ -70,9 +71,10 @@ ; CHECK-LABEL: test_nxv2f64_v8f64: ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-3 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG -; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: cntd x8 ; CHECK-NEXT: mov w9, #2 ; CHECK-NEXT: sub x8, x8, #2 diff --git a/llvm/test/CodeGen/AArch64/stack-guard-remat-bitcast.ll b/llvm/test/CodeGen/AArch64/stack-guard-remat-bitcast.ll --- a/llvm/test/CodeGen/AArch64/stack-guard-remat-bitcast.ll +++ b/llvm/test/CodeGen/AArch64/stack-guard-remat-bitcast.ll @@ -10,6 +10,7 @@ ; CHECK-LABEL: test_stack_guard_remat2: ; CHECK: ; %bb.0: ; %entry ; CHECK-NEXT: sub sp, sp, #64 +; CHECK-NEXT: .cfi_def_cfa_offset 64 ; CHECK-NEXT: stp x29, x30, [sp, #48] ; 16-byte Folded Spill ; CHECK-NEXT: add x29, sp, #48 ; CHECK-NEXT: .cfi_def_cfa w29, 16 diff --git a/llvm/test/CodeGen/AArch64/stack-guard-sysreg.ll b/llvm/test/CodeGen/AArch64/stack-guard-sysreg.ll --- a/llvm/test/CodeGen/AArch64/stack-guard-sysreg.ll +++ b/llvm/test/CodeGen/AArch64/stack-guard-sysreg.ll @@ -42,11 +42,12 @@ ; CHECK-LABEL: foo: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill +; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: mov x29, sp -; CHECK-NEXT: sub sp, sp, #16 ; CHECK-NEXT: .cfi_def_cfa w29, 16 ; CHECK-NEXT: .cfi_offset w30, -8 ; CHECK-NEXT: .cfi_offset w29, -16 +; CHECK-NEXT: sub sp, sp, #16 ; CHECK-NEXT: mrs x8, SP_EL0 ; CHECK-NEXT: lsl x9, x0, #2 ; CHECK-NO-OFFSET: ldr x8, [x8] diff --git a/llvm/test/CodeGen/AArch64/statepoint-call-lowering.ll b/llvm/test/CodeGen/AArch64/statepoint-call-lowering.ll --- a/llvm/test/CodeGen/AArch64/statepoint-call-lowering.ll +++ b/llvm/test/CodeGen/AArch64/statepoint-call-lowering.ll @@ -161,8 +161,8 @@ ; CHECK-LABEL: test_cross_bb: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: str x30, [sp, #-32]! // 8-byte Folded Spill -; CHECK-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 32 +; CHECK-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill ; CHECK-NEXT: .cfi_offset w19, -8 ; CHECK-NEXT: .cfi_offset w20, -16 ; CHECK-NEXT: .cfi_offset w30, -32 @@ -205,8 +205,8 @@ ; CHECK-LABEL: test_attributes: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: sub sp, sp, #48 -; CHECK-NEXT: str x30, [sp, #32] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 48 +; CHECK-NEXT: str x30, [sp, #32] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: mov x18, xzr ; CHECK-NEXT: ldr q0, [sp, #48] diff --git a/llvm/test/CodeGen/AArch64/sve-alloca.ll b/llvm/test/CodeGen/AArch64/sve-alloca.ll --- a/llvm/test/CodeGen/AArch64/sve-alloca.ll +++ b/llvm/test/CodeGen/AArch64/sve-alloca.ll @@ -9,8 +9,14 @@ ; CHECK-NEXT: tbz w0, #0, .LBB0_2 ; CHECK-NEXT: // %bb.1: // %if.then ; CHECK-NEXT: stp x29, x30, [sp, #-32]! // 16-byte Folded Spill +; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: stp x28, x19, [sp, #16] // 16-byte Folded Spill ; CHECK-NEXT: mov x29, sp +; CHECK-NEXT: .cfi_def_cfa w29, 32 +; CHECK-NEXT: .cfi_offset w19, -8 +; CHECK-NEXT: .cfi_offset w28, -16 +; CHECK-NEXT: .cfi_offset w30, -24 +; CHECK-NEXT: .cfi_offset w29, -32 ; CHECK-NEXT: addvl sp, sp, #-18 ; CHECK-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill @@ -40,8 +46,6 @@ ; CHECK-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: mov x19, sp -; CHECK-NEXT: .cfi_def_cfa w29, 32 ; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 32 - 8 * VG ; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 32 - 16 * VG ; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 32 - 24 * VG @@ -50,13 +54,10 @@ ; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 32 - 48 * VG ; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 32 - 56 * VG ; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 32 - 64 * VG -; CHECK-NEXT: .cfi_offset w19, -8 -; CHECK-NEXT: .cfi_offset w28, -16 -; CHECK-NEXT: .cfi_offset w30, -24 -; CHECK-NEXT: .cfi_offset w29, -32 ; CHECK-NEXT: rdvl x9, #2 ; CHECK-NEXT: mov x8, sp ; CHECK-NEXT: add x9, x9, #15 +; CHECK-NEXT: mov x19, sp ; CHECK-NEXT: and x9, x9, #0xfffffffffffffff0 ; CHECK-NEXT: sub x8, x8, x9 ; CHECK-NEXT: and x0, x8, #0xffffffffffffffe0 diff --git a/llvm/test/CodeGen/AArch64/sve-extract-scalable-vector.ll b/llvm/test/CodeGen/AArch64/sve-extract-scalable-vector.ll --- a/llvm/test/CodeGen/AArch64/sve-extract-scalable-vector.ll +++ b/llvm/test/CodeGen/AArch64/sve-extract-scalable-vector.ll @@ -60,17 +60,18 @@ ; CHECK-LABEL: extract_nxv14i1_nxv28i1_14: ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-1 -; CHECK-NEXT: str p5, [sp, #6, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG -; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: punpkhi p2.h, p1.b ; CHECK-NEXT: punpklo p1.h, p1.b ; CHECK-NEXT: punpklo p2.h, p2.b +; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: punpkhi p0.h, p0.b ; CHECK-NEXT: punpkhi p3.h, p1.b ; CHECK-NEXT: punpkhi p4.h, p2.b +; CHECK-NEXT: str p5, [sp, #6, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: punpklo p1.h, p1.b ; CHECK-NEXT: punpklo p2.h, p2.b ; CHECK-NEXT: punpkhi p5.h, p3.b diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-vselect.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-vselect.ll --- a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-vselect.ll +++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-vselect.ll @@ -48,12 +48,13 @@ ; CHECK-LABEL: select_v32i8: ; CHECK: // %bb.0: ; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill -; CHECK-NEXT: sub x9, sp, #48 +; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: mov x29, sp -; CHECK-NEXT: and sp, x9, #0xffffffffffffffe0 ; CHECK-NEXT: .cfi_def_cfa w29, 16 ; CHECK-NEXT: .cfi_offset w30, -8 ; CHECK-NEXT: .cfi_offset w29, -16 +; CHECK-NEXT: sub x9, sp, #48 +; CHECK-NEXT: and sp, x9, #0xffffffffffffffe0 ; CHECK-NEXT: ldr w8, [x2] ; CHECK-NEXT: ptrue p0.b, vl32 ; CHECK-NEXT: ptrue p1.b @@ -143,12 +144,13 @@ ; VBITS_GE_512-LABEL: select_v64i8: ; VBITS_GE_512: // %bb.0: ; VBITS_GE_512-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill -; VBITS_GE_512-NEXT: sub x9, sp, #112 +; VBITS_GE_512-NEXT: .cfi_def_cfa_offset 16 ; VBITS_GE_512-NEXT: mov x29, sp -; VBITS_GE_512-NEXT: and sp, x9, #0xffffffffffffffc0 ; VBITS_GE_512-NEXT: .cfi_def_cfa w29, 16 ; VBITS_GE_512-NEXT: .cfi_offset w30, -8 ; VBITS_GE_512-NEXT: .cfi_offset w29, -16 +; VBITS_GE_512-NEXT: sub x9, sp, #112 +; VBITS_GE_512-NEXT: and sp, x9, #0xffffffffffffffc0 ; VBITS_GE_512-NEXT: ldr x8, [x2] ; VBITS_GE_512-NEXT: ptrue p0.b, vl64 ; VBITS_GE_512-NEXT: ptrue p1.b @@ -302,12 +304,13 @@ ; VBITS_GE_1024-LABEL: select_v128i8: ; VBITS_GE_1024: // %bb.0: ; VBITS_GE_1024-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill -; VBITS_GE_1024-NEXT: sub x9, sp, #240 +; VBITS_GE_1024-NEXT: .cfi_def_cfa_offset 16 ; VBITS_GE_1024-NEXT: mov x29, sp -; VBITS_GE_1024-NEXT: and sp, x9, #0xffffffffffffff80 ; VBITS_GE_1024-NEXT: .cfi_def_cfa w29, 16 ; VBITS_GE_1024-NEXT: .cfi_offset w30, -8 ; VBITS_GE_1024-NEXT: .cfi_offset w29, -16 +; VBITS_GE_1024-NEXT: sub x9, sp, #240 +; VBITS_GE_1024-NEXT: and sp, x9, #0xffffffffffffff80 ; VBITS_GE_1024-NEXT: ldr x8, [x2, #8] ; VBITS_GE_1024-NEXT: ptrue p0.b, vl128 ; VBITS_GE_1024-NEXT: ptrue p1.b @@ -590,12 +593,13 @@ ; VBITS_GE_2048-LABEL: select_v256i8: ; VBITS_GE_2048: // %bb.0: ; VBITS_GE_2048-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill -; VBITS_GE_2048-NEXT: sub x9, sp, #496 +; VBITS_GE_2048-NEXT: .cfi_def_cfa_offset 16 ; VBITS_GE_2048-NEXT: mov x29, sp -; VBITS_GE_2048-NEXT: and sp, x9, #0xffffffffffffff00 ; VBITS_GE_2048-NEXT: .cfi_def_cfa w29, 16 ; VBITS_GE_2048-NEXT: .cfi_offset w30, -8 ; VBITS_GE_2048-NEXT: .cfi_offset w29, -16 +; VBITS_GE_2048-NEXT: sub x9, sp, #496 +; VBITS_GE_2048-NEXT: and sp, x9, #0xffffffffffffff00 ; VBITS_GE_2048-NEXT: ldr x8, [x2, #24] ; VBITS_GE_2048-NEXT: ptrue p0.b, vl256 ; VBITS_GE_2048-NEXT: ptrue p1.b @@ -1161,12 +1165,13 @@ ; CHECK-LABEL: select_v16i16: ; CHECK: // %bb.0: ; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill -; CHECK-NEXT: sub x9, sp, #48 +; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: mov x29, sp -; CHECK-NEXT: and sp, x9, #0xffffffffffffffe0 ; CHECK-NEXT: .cfi_def_cfa w29, 16 ; CHECK-NEXT: .cfi_offset w30, -8 ; CHECK-NEXT: .cfi_offset w29, -16 +; CHECK-NEXT: sub x9, sp, #48 +; CHECK-NEXT: and sp, x9, #0xffffffffffffffe0 ; CHECK-NEXT: ldrh w8, [x2] ; CHECK-NEXT: ptrue p0.h, vl16 ; CHECK-NEXT: ptrue p1.h @@ -1224,12 +1229,13 @@ ; VBITS_GE_512-LABEL: select_v32i16: ; VBITS_GE_512: // %bb.0: ; VBITS_GE_512-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill -; VBITS_GE_512-NEXT: sub x9, sp, #112 +; VBITS_GE_512-NEXT: .cfi_def_cfa_offset 16 ; VBITS_GE_512-NEXT: mov x29, sp -; VBITS_GE_512-NEXT: and sp, x9, #0xffffffffffffffc0 ; VBITS_GE_512-NEXT: .cfi_def_cfa w29, 16 ; VBITS_GE_512-NEXT: .cfi_offset w30, -8 ; VBITS_GE_512-NEXT: .cfi_offset w29, -16 +; VBITS_GE_512-NEXT: sub x9, sp, #112 +; VBITS_GE_512-NEXT: and sp, x9, #0xffffffffffffffc0 ; VBITS_GE_512-NEXT: ldr w8, [x2] ; VBITS_GE_512-NEXT: ptrue p0.h, vl32 ; VBITS_GE_512-NEXT: ptrue p1.h @@ -1319,12 +1325,13 @@ ; VBITS_GE_1024-LABEL: select_v64i16: ; VBITS_GE_1024: // %bb.0: ; VBITS_GE_1024-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill -; VBITS_GE_1024-NEXT: sub x9, sp, #240 +; VBITS_GE_1024-NEXT: .cfi_def_cfa_offset 16 ; VBITS_GE_1024-NEXT: mov x29, sp -; VBITS_GE_1024-NEXT: and sp, x9, #0xffffffffffffff80 ; VBITS_GE_1024-NEXT: .cfi_def_cfa w29, 16 ; VBITS_GE_1024-NEXT: .cfi_offset w30, -8 ; VBITS_GE_1024-NEXT: .cfi_offset w29, -16 +; VBITS_GE_1024-NEXT: sub x9, sp, #240 +; VBITS_GE_1024-NEXT: and sp, x9, #0xffffffffffffff80 ; VBITS_GE_1024-NEXT: ldr x8, [x2] ; VBITS_GE_1024-NEXT: ptrue p0.h, vl64 ; VBITS_GE_1024-NEXT: ptrue p1.h @@ -1478,12 +1485,13 @@ ; VBITS_GE_2048-LABEL: select_v128i16: ; VBITS_GE_2048: // %bb.0: ; VBITS_GE_2048-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill -; VBITS_GE_2048-NEXT: sub x9, sp, #496 +; VBITS_GE_2048-NEXT: .cfi_def_cfa_offset 16 ; VBITS_GE_2048-NEXT: mov x29, sp -; VBITS_GE_2048-NEXT: and sp, x9, #0xffffffffffffff00 ; VBITS_GE_2048-NEXT: .cfi_def_cfa w29, 16 ; VBITS_GE_2048-NEXT: .cfi_offset w30, -8 ; VBITS_GE_2048-NEXT: .cfi_offset w29, -16 +; VBITS_GE_2048-NEXT: sub x9, sp, #496 +; VBITS_GE_2048-NEXT: and sp, x9, #0xffffffffffffff00 ; VBITS_GE_2048-NEXT: ldr x8, [x2, #8] ; VBITS_GE_2048-NEXT: ptrue p0.h, vl128 ; VBITS_GE_2048-NEXT: ptrue p1.h @@ -1791,12 +1799,13 @@ ; CHECK-LABEL: select_v8i32: ; CHECK: // %bb.0: ; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill -; CHECK-NEXT: sub x9, sp, #48 +; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: mov x29, sp -; CHECK-NEXT: and sp, x9, #0xffffffffffffffe0 ; CHECK-NEXT: .cfi_def_cfa w29, 16 ; CHECK-NEXT: .cfi_offset w30, -8 ; CHECK-NEXT: .cfi_offset w29, -16 +; CHECK-NEXT: sub x9, sp, #48 +; CHECK-NEXT: and sp, x9, #0xffffffffffffffe0 ; CHECK-NEXT: ldrb w8, [x2] ; CHECK-NEXT: ptrue p0.s, vl8 ; CHECK-NEXT: ptrue p1.s @@ -1834,12 +1843,13 @@ ; VBITS_GE_512-LABEL: select_v16i32: ; VBITS_GE_512: // %bb.0: ; VBITS_GE_512-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill -; VBITS_GE_512-NEXT: sub x9, sp, #112 +; VBITS_GE_512-NEXT: .cfi_def_cfa_offset 16 ; VBITS_GE_512-NEXT: mov x29, sp -; VBITS_GE_512-NEXT: and sp, x9, #0xffffffffffffffc0 ; VBITS_GE_512-NEXT: .cfi_def_cfa w29, 16 ; VBITS_GE_512-NEXT: .cfi_offset w30, -8 ; VBITS_GE_512-NEXT: .cfi_offset w29, -16 +; VBITS_GE_512-NEXT: sub x9, sp, #112 +; VBITS_GE_512-NEXT: and sp, x9, #0xffffffffffffffc0 ; VBITS_GE_512-NEXT: ldrh w8, [x2] ; VBITS_GE_512-NEXT: ptrue p0.s, vl16 ; VBITS_GE_512-NEXT: ptrue p1.s @@ -1889,12 +1899,13 @@ ; VBITS_GE_1024-LABEL: select_v32i32: ; VBITS_GE_1024: // %bb.0: ; VBITS_GE_1024-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill -; VBITS_GE_1024-NEXT: sub x9, sp, #240 +; VBITS_GE_1024-NEXT: .cfi_def_cfa_offset 16 ; VBITS_GE_1024-NEXT: mov x29, sp -; VBITS_GE_1024-NEXT: and sp, x9, #0xffffffffffffff80 ; VBITS_GE_1024-NEXT: .cfi_def_cfa w29, 16 ; VBITS_GE_1024-NEXT: .cfi_offset w30, -8 ; VBITS_GE_1024-NEXT: .cfi_offset w29, -16 +; VBITS_GE_1024-NEXT: sub x9, sp, #240 +; VBITS_GE_1024-NEXT: and sp, x9, #0xffffffffffffff80 ; VBITS_GE_1024-NEXT: ldr w8, [x2] ; VBITS_GE_1024-NEXT: ptrue p0.s, vl32 ; VBITS_GE_1024-NEXT: ptrue p1.s @@ -1968,12 +1979,13 @@ ; VBITS_GE_2048-LABEL: select_v64i32: ; VBITS_GE_2048: // %bb.0: ; VBITS_GE_2048-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill -; VBITS_GE_2048-NEXT: sub x9, sp, #496 +; VBITS_GE_2048-NEXT: .cfi_def_cfa_offset 16 ; VBITS_GE_2048-NEXT: mov x29, sp -; VBITS_GE_2048-NEXT: and sp, x9, #0xffffffffffffff00 ; VBITS_GE_2048-NEXT: .cfi_def_cfa w29, 16 ; VBITS_GE_2048-NEXT: .cfi_offset w30, -8 ; VBITS_GE_2048-NEXT: .cfi_offset w29, -16 +; VBITS_GE_2048-NEXT: sub x9, sp, #496 +; VBITS_GE_2048-NEXT: and sp, x9, #0xffffffffffffff00 ; VBITS_GE_2048-NEXT: ldr x8, [x2] ; VBITS_GE_2048-NEXT: ptrue p0.s, vl64 ; VBITS_GE_2048-NEXT: ptrue p1.s @@ -2121,12 +2133,13 @@ ; CHECK-LABEL: select_v4i64: ; CHECK: // %bb.0: ; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill -; CHECK-NEXT: sub x9, sp, #48 +; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: mov x29, sp -; CHECK-NEXT: and sp, x9, #0xffffffffffffffe0 ; CHECK-NEXT: .cfi_def_cfa w29, 16 ; CHECK-NEXT: .cfi_offset w30, -8 ; CHECK-NEXT: .cfi_offset w29, -16 +; CHECK-NEXT: sub x9, sp, #48 +; CHECK-NEXT: and sp, x9, #0xffffffffffffffe0 ; CHECK-NEXT: ldrb w8, [x2] ; CHECK-NEXT: ptrue p0.d, vl4 ; CHECK-NEXT: ptrue p1.d @@ -2161,12 +2174,13 @@ ; VBITS_GE_512-LABEL: select_v8i64: ; VBITS_GE_512: // %bb.0: ; VBITS_GE_512-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill -; VBITS_GE_512-NEXT: sub x9, sp, #112 +; VBITS_GE_512-NEXT: .cfi_def_cfa_offset 16 ; VBITS_GE_512-NEXT: mov x29, sp -; VBITS_GE_512-NEXT: and sp, x9, #0xffffffffffffffc0 ; VBITS_GE_512-NEXT: .cfi_def_cfa w29, 16 ; VBITS_GE_512-NEXT: .cfi_offset w30, -8 ; VBITS_GE_512-NEXT: .cfi_offset w29, -16 +; VBITS_GE_512-NEXT: sub x9, sp, #112 +; VBITS_GE_512-NEXT: and sp, x9, #0xffffffffffffffc0 ; VBITS_GE_512-NEXT: ldrb w8, [x2] ; VBITS_GE_512-NEXT: ptrue p0.d, vl8 ; VBITS_GE_512-NEXT: ptrue p1.d @@ -2211,12 +2225,13 @@ ; VBITS_GE_1024-LABEL: select_v16i64: ; VBITS_GE_1024: // %bb.0: ; VBITS_GE_1024-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill -; VBITS_GE_1024-NEXT: sub x9, sp, #240 +; VBITS_GE_1024-NEXT: .cfi_def_cfa_offset 16 ; VBITS_GE_1024-NEXT: mov x29, sp -; VBITS_GE_1024-NEXT: and sp, x9, #0xffffffffffffff80 ; VBITS_GE_1024-NEXT: .cfi_def_cfa w29, 16 ; VBITS_GE_1024-NEXT: .cfi_offset w30, -8 ; VBITS_GE_1024-NEXT: .cfi_offset w29, -16 +; VBITS_GE_1024-NEXT: sub x9, sp, #240 +; VBITS_GE_1024-NEXT: and sp, x9, #0xffffffffffffff80 ; VBITS_GE_1024-NEXT: ldrh w8, [x2] ; VBITS_GE_1024-NEXT: ptrue p0.d, vl16 ; VBITS_GE_1024-NEXT: ptrue p1.d @@ -2281,12 +2296,13 @@ ; VBITS_GE_2048-LABEL: select_v32i64: ; VBITS_GE_2048: // %bb.0: ; VBITS_GE_2048-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill -; VBITS_GE_2048-NEXT: sub x9, sp, #496 +; VBITS_GE_2048-NEXT: .cfi_def_cfa_offset 16 ; VBITS_GE_2048-NEXT: mov x29, sp -; VBITS_GE_2048-NEXT: and sp, x9, #0xffffffffffffff00 ; VBITS_GE_2048-NEXT: .cfi_def_cfa w29, 16 ; VBITS_GE_2048-NEXT: .cfi_offset w30, -8 ; VBITS_GE_2048-NEXT: .cfi_offset w29, -16 +; VBITS_GE_2048-NEXT: sub x9, sp, #496 +; VBITS_GE_2048-NEXT: and sp, x9, #0xffffffffffffff00 ; VBITS_GE_2048-NEXT: ldr w8, [x2] ; VBITS_GE_2048-NEXT: ptrue p0.d, vl32 ; VBITS_GE_2048-NEXT: ptrue p1.d diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-permute-rev.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-permute-rev.ll --- a/llvm/test/CodeGen/AArch64/sve-fixed-length-permute-rev.ll +++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-permute-rev.ll @@ -195,12 +195,13 @@ ; CHECK-LABEL: test_rev_elts_fail: ; CHECK: // %bb.0: ; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill -; CHECK-NEXT: sub x9, sp, #48 +; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: mov x29, sp -; CHECK-NEXT: and sp, x9, #0xffffffffffffffe0 ; CHECK-NEXT: .cfi_def_cfa w29, 16 ; CHECK-NEXT: .cfi_offset w30, -8 ; CHECK-NEXT: .cfi_offset w29, -16 +; CHECK-NEXT: sub x9, sp, #48 +; CHECK-NEXT: and sp, x9, #0xffffffffffffffe0 ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0] ; CHECK-NEXT: mov z1.d, z0.d[2] @@ -231,12 +232,13 @@ ; CHECK-LABEL: test_revv8i32: ; CHECK: // %bb.0: ; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill -; CHECK-NEXT: sub x9, sp, #48 +; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: mov x29, sp -; CHECK-NEXT: and sp, x9, #0xffffffffffffffe0 ; CHECK-NEXT: .cfi_def_cfa w29, 16 ; CHECK-NEXT: .cfi_offset w30, -8 ; CHECK-NEXT: .cfi_offset w29, -16 +; CHECK-NEXT: sub x9, sp, #48 +; CHECK-NEXT: and sp, x9, #0xffffffffffffffe0 ; CHECK-NEXT: ptrue p0.s, vl8 ; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0] ; CHECK-NEXT: mov w8, v0.s[1] @@ -347,12 +349,13 @@ ; CHECK-LABEL: test_rev_fail: ; CHECK: // %bb.0: ; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill -; CHECK-NEXT: sub x9, sp, #48 +; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: mov x29, sp -; CHECK-NEXT: and sp, x9, #0xffffffffffffffe0 ; CHECK-NEXT: .cfi_def_cfa w29, 16 ; CHECK-NEXT: .cfi_offset w30, -8 ; CHECK-NEXT: .cfi_offset w29, -16 +; CHECK-NEXT: sub x9, sp, #48 +; CHECK-NEXT: and sp, x9, #0xffffffffffffffe0 ; CHECK-NEXT: ptrue p0.h ; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0] ; CHECK-NEXT: mov z1.h, z0.h[8] @@ -411,12 +414,13 @@ ; CHECK-LABEL: test_revv8i16v8i16: ; CHECK: // %bb.0: ; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill -; CHECK-NEXT: sub x9, sp, #48 +; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: mov x29, sp -; CHECK-NEXT: and sp, x9, #0xffffffffffffffe0 ; CHECK-NEXT: .cfi_def_cfa w29, 16 ; CHECK-NEXT: .cfi_offset w30, -8 ; CHECK-NEXT: .cfi_offset w29, -16 +; CHECK-NEXT: sub x9, sp, #48 +; CHECK-NEXT: and sp, x9, #0xffffffffffffffe0 ; CHECK-NEXT: mov x8, sp ; CHECK-NEXT: ldr q0, [x1] ; CHECK-NEXT: orr x9, x8, #0x1e diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-permute-zip-uzp-trn.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-permute-zip-uzp-trn.ll --- a/llvm/test/CodeGen/AArch64/sve-fixed-length-permute-zip-uzp-trn.ll +++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-permute-zip-uzp-trn.ll @@ -132,12 +132,13 @@ ; VBITS_EQ_512-LABEL: zip_v4f64: ; VBITS_EQ_512: // %bb.0: ; VBITS_EQ_512-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill -; VBITS_EQ_512-NEXT: sub x9, sp, #48 +; VBITS_EQ_512-NEXT: .cfi_def_cfa_offset 16 ; VBITS_EQ_512-NEXT: mov x29, sp -; VBITS_EQ_512-NEXT: and sp, x9, #0xffffffffffffffe0 ; VBITS_EQ_512-NEXT: .cfi_def_cfa w29, 16 ; VBITS_EQ_512-NEXT: .cfi_offset w30, -8 ; VBITS_EQ_512-NEXT: .cfi_offset w29, -16 +; VBITS_EQ_512-NEXT: sub x9, sp, #48 +; VBITS_EQ_512-NEXT: and sp, x9, #0xffffffffffffffe0 ; VBITS_EQ_512-NEXT: ptrue p0.d, vl4 ; VBITS_EQ_512-NEXT: ld1d { z0.d }, p0/z, [x0] ; VBITS_EQ_512-NEXT: ld1d { z1.d }, p0/z, [x1] @@ -648,12 +649,13 @@ ; CHECK-LABEL: zip_vscale2_4: ; CHECK: // %bb.0: ; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill -; CHECK-NEXT: sub x9, sp, #48 +; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: mov x29, sp -; CHECK-NEXT: and sp, x9, #0xffffffffffffffe0 ; CHECK-NEXT: .cfi_def_cfa w29, 16 ; CHECK-NEXT: .cfi_offset w30, -8 ; CHECK-NEXT: .cfi_offset w29, -16 +; CHECK-NEXT: sub x9, sp, #48 +; CHECK-NEXT: and sp, x9, #0xffffffffffffffe0 ; CHECK-NEXT: ptrue p0.d, vl4 ; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0] ; CHECK-NEXT: ld1d { z1.d }, p0/z, [x1] diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-vector-shuffle.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-vector-shuffle.ll --- a/llvm/test/CodeGen/AArch64/sve-fixed-length-vector-shuffle.ll +++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-vector-shuffle.ll @@ -932,12 +932,13 @@ ; CHECK-LABEL: shuffle_ext_invalid: ; CHECK: // %bb.0: ; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill -; CHECK-NEXT: sub x9, sp, #48 +; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: mov x29, sp -; CHECK-NEXT: and sp, x9, #0xffffffffffffffe0 ; CHECK-NEXT: .cfi_def_cfa w29, 16 ; CHECK-NEXT: .cfi_offset w30, -8 ; CHECK-NEXT: .cfi_offset w29, -16 +; CHECK-NEXT: sub x9, sp, #48 +; CHECK-NEXT: and sp, x9, #0xffffffffffffffe0 ; CHECK-NEXT: ptrue p0.d, vl4 ; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0] ; CHECK-NEXT: ld1d { z1.d }, p0/z, [x1] diff --git a/llvm/test/CodeGen/AArch64/sve-insert-element.ll b/llvm/test/CodeGen/AArch64/sve-insert-element.ll --- a/llvm/test/CodeGen/AArch64/sve-insert-element.ll +++ b/llvm/test/CodeGen/AArch64/sve-insert-element.ll @@ -500,9 +500,10 @@ ; CHECK-LABEL: test_predicate_insert_32xi1: ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-2 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG -; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: mov x8, #-1 ; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1 ; CHECK-NEXT: sxtw x9, w1 diff --git a/llvm/test/CodeGen/AArch64/sve-insert-vector.ll b/llvm/test/CodeGen/AArch64/sve-insert-vector.ll --- a/llvm/test/CodeGen/AArch64/sve-insert-vector.ll +++ b/llvm/test/CodeGen/AArch64/sve-insert-vector.ll @@ -190,9 +190,10 @@ ; CHECK-LABEL: insert_v2i64_nxv16i64: ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-4 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG -; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 ; CHECK-NEXT: st1d { z0.d }, p0, [sp] @@ -231,9 +232,10 @@ ; CHECK-LABEL: insert_v2i64_nxv16i64_lo2: ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-2 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG -; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: ldr q0, [x0] ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: str q0, [sp, #16] diff --git a/llvm/test/CodeGen/AArch64/sve-ldnf1.mir b/llvm/test/CodeGen/AArch64/sve-ldnf1.mir --- a/llvm/test/CodeGen/AArch64/sve-ldnf1.mir +++ b/llvm/test/CodeGen/AArch64/sve-ldnf1.mir @@ -44,9 +44,10 @@ ; CHECK: liveins: $p0 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: early-clobber $sp = frame-setup STRXpre killed $fp, $sp, -16 :: (store (s64) into %stack.2) + ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16 + ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16 ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -4 ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 - ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16 ; CHECK-NEXT: renamable $z0 = LDNF1B_IMM renamable $p0, $sp, 7 :: (load (s8) from %ir.object, align 2) ; CHECK-NEXT: renamable $z0 = LDNF1B_H_IMM renamable $p0, $sp, 7 :: (load (s8) from %ir.object, align 2) ; CHECK-NEXT: renamable $z0 = LDNF1B_S_IMM renamable $p0, $sp, 7 :: (load (s8) from %ir.object, align 2) @@ -99,9 +100,10 @@ ; CHECK: liveins: $p0 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: early-clobber $sp = frame-setup STRXpre killed $fp, $sp, -16 :: (store (s64) into %stack.2) + ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16 + ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16 ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -4 ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 - ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16 ; CHECK-NEXT: renamable $z0 = LDNF1B_IMM renamable $p0, $sp, -8 :: (load (s8) from %ir.object, align 2) ; CHECK-NEXT: renamable $z0 = LDNF1B_H_IMM renamable $p0, $sp, -8 :: (load (s8) from %ir.object, align 2) ; CHECK-NEXT: renamable $z0 = LDNF1B_S_IMM renamable $p0, $sp, -8 :: (load (s8) from %ir.object, align 2) @@ -154,9 +156,10 @@ ; CHECK: liveins: $p0 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: early-clobber $sp = frame-setup STRXpre killed $fp, $sp, -16 :: (store (s64) into %stack.2) + ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16 + ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16 ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -4 ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 - ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16 ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1 ; CHECK-NEXT: renamable $z0 = LDNF1B_IMM renamable $p0, killed $x8, 7 :: (load (s8) from %ir.object, align 2) ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 4 @@ -222,9 +225,10 @@ ; CHECK: liveins: $p0 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: early-clobber $sp = frame-setup STRXpre killed $fp, $sp, -16 :: (store (s64) into %stack.2) + ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16 + ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16 ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -4 ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 - ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16 ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1 ; CHECK-NEXT: renamable $z0 = LDNF1B_IMM renamable $p0, killed $x8, -8 :: (load (s8) from %ir.object, align 2) ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -4 diff --git a/llvm/test/CodeGen/AArch64/sve-ldstnt1.mir b/llvm/test/CodeGen/AArch64/sve-ldstnt1.mir --- a/llvm/test/CodeGen/AArch64/sve-ldstnt1.mir +++ b/llvm/test/CodeGen/AArch64/sve-ldstnt1.mir @@ -44,9 +44,10 @@ ; CHECK: liveins: $p0 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: early-clobber $sp = frame-setup STRXpre killed $fp, $sp, -16 :: (store (s64) into %stack.2) + ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16 + ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16 ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -4 ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 - ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16 ; CHECK-NEXT: renamable $z0 = LDNT1B_ZRI renamable $p0, $sp, 7 :: (load (s8) from %ir.object, align 2) ; CHECK-NEXT: renamable $z0 = LDNT1H_ZRI renamable $p0, $sp, 7 :: (load (s16) from %ir.object) ; CHECK-NEXT: renamable $z0 = LDNT1W_ZRI renamable $p0, $sp, 7 :: (load (s32) from %ir.object, align 8) @@ -83,9 +84,10 @@ ; CHECK: liveins: $p0 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: early-clobber $sp = frame-setup STRXpre killed $fp, $sp, -16 :: (store (s64) into %stack.2) + ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16 + ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16 ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -4 ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 - ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16 ; CHECK-NEXT: renamable $z0 = LDNT1B_ZRI renamable $p0, $sp, -8 :: (load (s8) from %ir.object, align 2) ; CHECK-NEXT: renamable $z0 = LDNT1H_ZRI renamable $p0, $sp, -8 :: (load (s16) from %ir.object) ; CHECK-NEXT: renamable $z0 = LDNT1W_ZRI renamable $p0, $sp, -8 :: (load (s32) from %ir.object) @@ -122,9 +124,10 @@ ; CHECK: liveins: $p0 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: early-clobber $sp = frame-setup STRXpre killed $fp, $sp, -16 :: (store (s64) into %stack.2) + ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16 + ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16 ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -4 ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 - ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16 ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1 ; CHECK-NEXT: renamable $z0 = LDNT1B_ZRI renamable $p0, killed $x8, 7 :: (load (s8) from %ir.object, align 2) ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1 @@ -169,9 +172,10 @@ ; CHECK: liveins: $p0 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: early-clobber $sp = frame-setup STRXpre killed $fp, $sp, -16 :: (store (s64) into %stack.2) + ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16 + ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16 ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -4 ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 - ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16 ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1 ; CHECK-NEXT: renamable $z0 = LDNT1B_ZRI renamable $p0, killed $x8, -8 :: (load (s8) from %ir.object, align 2) ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1 diff --git a/llvm/test/CodeGen/AArch64/sve-pred-arith.ll b/llvm/test/CodeGen/AArch64/sve-pred-arith.ll --- a/llvm/test/CodeGen/AArch64/sve-pred-arith.ll +++ b/llvm/test/CodeGen/AArch64/sve-pred-arith.ll @@ -50,19 +50,20 @@ ; CHECK-LABEL: add_nxv64i1: ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-1 +; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: str p8, [sp, #3, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p7, [sp, #4, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p6, [sp, #5, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p5, [sp, #6, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG -; CHECK-NEXT: .cfi_offset w29, -16 +; CHECK-NEXT: ptrue p8.b ; CHECK-NEXT: ldr p4, [x1] ; CHECK-NEXT: ldr p5, [x0] ; CHECK-NEXT: ldr p6, [x3] ; CHECK-NEXT: ldr p7, [x2] -; CHECK-NEXT: ptrue p8.b ; CHECK-NEXT: eor p0.b, p8/z, p0.b, p5.b ; CHECK-NEXT: eor p1.b, p8/z, p1.b, p4.b ; CHECK-NEXT: eor p2.b, p8/z, p2.b, p7.b @@ -130,19 +131,20 @@ ; CHECK-LABEL: sub_nxv64i1: ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-1 +; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG ; CHECK-NEXT: str p8, [sp, #3, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p7, [sp, #4, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p6, [sp, #5, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p5, [sp, #6, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG -; CHECK-NEXT: .cfi_offset w29, -16 +; CHECK-NEXT: ptrue p8.b ; CHECK-NEXT: ldr p4, [x1] ; CHECK-NEXT: ldr p5, [x0] ; CHECK-NEXT: ldr p6, [x3] ; CHECK-NEXT: ldr p7, [x2] -; CHECK-NEXT: ptrue p8.b ; CHECK-NEXT: eor p0.b, p8/z, p0.b, p5.b ; CHECK-NEXT: eor p1.b, p8/z, p1.b, p4.b ; CHECK-NEXT: eor p2.b, p8/z, p2.b, p7.b diff --git a/llvm/test/CodeGen/AArch64/sve-split-extract-elt.ll b/llvm/test/CodeGen/AArch64/sve-split-extract-elt.ll --- a/llvm/test/CodeGen/AArch64/sve-split-extract-elt.ll +++ b/llvm/test/CodeGen/AArch64/sve-split-extract-elt.ll @@ -20,9 +20,10 @@ ; CHECK-LABEL: split_extract_32i8_idx: ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-2 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG -; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: mov x8, #-1 ; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0 ; CHECK-NEXT: sxtw x9, w0 @@ -45,9 +46,10 @@ ; CHECK-LABEL: split_extract_16i16_idx: ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-2 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG -; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: mov x8, #-1 ; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0 ; CHECK-NEXT: sxtw x9, w0 @@ -70,9 +72,10 @@ ; CHECK-LABEL: split_extract_8i32_idx: ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-2 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG -; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: cnth x8 ; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0 ; CHECK-NEXT: sxtw x9, w0 @@ -95,9 +98,10 @@ ; CHECK-LABEL: split_extract_8i64_idx: ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-4 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG -; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: cnth x8 ; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0 ; CHECK-NEXT: sxtw x9, w0 @@ -142,9 +146,10 @@ ; CHECK-LABEL: split_extract_16i16: ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-2 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG -; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: mov x8, #-1 ; CHECK-NEXT: mov w9, #128 ; CHECK-NEXT: ptrue p0.h @@ -166,9 +171,10 @@ ; CHECK-LABEL: split_extract_16i32: ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-4 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG -; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: mov x8, #-1 ; CHECK-NEXT: mov w9, #34464 ; CHECK-NEXT: movk w9, #1, lsl #16 @@ -193,9 +199,10 @@ ; CHECK-LABEL: split_extract_4i64: ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-2 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG -; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: cntw x8 ; CHECK-NEXT: mov w9, #10 ; CHECK-NEXT: sub x8, x8, #1 diff --git a/llvm/test/CodeGen/AArch64/sve-split-insert-elt.ll b/llvm/test/CodeGen/AArch64/sve-split-insert-elt.ll --- a/llvm/test/CodeGen/AArch64/sve-split-insert-elt.ll +++ b/llvm/test/CodeGen/AArch64/sve-split-insert-elt.ll @@ -20,9 +20,10 @@ ; CHECK-LABEL: split_insert_32i8_idx: ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-2 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG -; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: mov x8, #-1 ; CHECK-NEXT: mov x9, sp ; CHECK-NEXT: ptrue p0.b @@ -45,9 +46,10 @@ ; CHECK-LABEL: split_insert_8f32_idx: ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-2 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG -; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: cnth x8 ; CHECK-NEXT: mov x9, sp ; CHECK-NEXT: sub x8, x8, #1 @@ -70,9 +72,10 @@ ; CHECK-LABEL: split_insert_8i64_idx: ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-4 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG -; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: cnth x8 ; CHECK-NEXT: mov x9, sp ; CHECK-NEXT: sub x8, x8, #1 @@ -132,9 +135,10 @@ ; CHECK-LABEL: split_insert_32i16: ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-4 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG -; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: mov x8, #-1 ; CHECK-NEXT: mov w9, #128 ; CHECK-NEXT: ptrue p0.h @@ -162,9 +166,10 @@ ; CHECK-LABEL: split_insert_8i32: ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-2 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG -; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: cnth x8 ; CHECK-NEXT: mov w9, #16960 ; CHECK-NEXT: movk w9, #15, lsl #16 diff --git a/llvm/test/CodeGen/AArch64/sve-split-int-pred-reduce.ll b/llvm/test/CodeGen/AArch64/sve-split-int-pred-reduce.ll --- a/llvm/test/CodeGen/AArch64/sve-split-int-pred-reduce.ll +++ b/llvm/test/CodeGen/AArch64/sve-split-int-pred-reduce.ll @@ -20,12 +20,13 @@ ; CHECK-LABEL: andv_nxv64i1: ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-1 -; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG -; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: and p1.b, p1/z, p1.b, p3.b ; CHECK-NEXT: and p0.b, p0/z, p0.b, p2.b +; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: ptrue p4.b ; CHECK-NEXT: and p0.b, p0/z, p0.b, p1.b ; CHECK-NEXT: not p0.b, p4/z, p0.b diff --git a/llvm/test/CodeGen/AArch64/sve-trunc.ll b/llvm/test/CodeGen/AArch64/sve-trunc.ll --- a/llvm/test/CodeGen/AArch64/sve-trunc.ll +++ b/llvm/test/CodeGen/AArch64/sve-trunc.ll @@ -110,16 +110,16 @@ ; CHECK-LABEL: trunc_i64toi1_split3: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-1 -; CHECK-NEXT: str p5, [sp, #6, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG -; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: and z7.d, z7.d, #0x1 ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: and z6.d, z6.d, #0x1 ; CHECK-NEXT: and z5.d, z5.d, #0x1 ; CHECK-NEXT: and z4.d, z4.d, #0x1 +; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: cmpne p1.d, p0/z, z7.d, #0 ; CHECK-NEXT: cmpne p2.d, p0/z, z6.d, #0 ; CHECK-NEXT: cmpne p3.d, p0/z, z5.d, #0 @@ -128,6 +128,7 @@ ; CHECK-NEXT: and z2.d, z2.d, #0x1 ; CHECK-NEXT: and z1.d, z1.d, #0x1 ; CHECK-NEXT: and z0.d, z0.d, #0x1 +; CHECK-NEXT: str p5, [sp, #6, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: uzp1 p1.s, p2.s, p1.s ; CHECK-NEXT: uzp1 p2.s, p4.s, p3.s ; CHECK-NEXT: cmpne p3.d, p0/z, z3.d, #0 diff --git a/llvm/test/CodeGen/AArch64/swifterror.ll b/llvm/test/CodeGen/AArch64/swifterror.ll --- a/llvm/test/CodeGen/AArch64/swifterror.ll +++ b/llvm/test/CodeGen/AArch64/swifterror.ll @@ -14,6 +14,7 @@ ; CHECK-APPLE-LABEL: foo: ; CHECK-APPLE: ; %bb.0: ; %entry ; CHECK-APPLE-NEXT: stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill +; CHECK-APPLE-NEXT: .cfi_def_cfa_offset 16 ; CHECK-APPLE-NEXT: mov x29, sp ; CHECK-APPLE-NEXT: .cfi_def_cfa w29, 16 ; CHECK-APPLE-NEXT: .cfi_offset w30, -8 @@ -30,6 +31,7 @@ ; CHECK-O0-AARCH64-LABEL: foo: ; CHECK-O0-AARCH64: ; %bb.0: ; %entry ; CHECK-O0-AARCH64-NEXT: stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill +; CHECK-O0-AARCH64-NEXT: .cfi_def_cfa_offset 16 ; CHECK-O0-AARCH64-NEXT: mov x29, sp ; CHECK-O0-AARCH64-NEXT: .cfi_def_cfa w29, 16 ; CHECK-O0-AARCH64-NEXT: .cfi_offset w30, -8 @@ -73,6 +75,7 @@ ; CHECK-APPLE-AARCH64-LABEL: caller: ; CHECK-APPLE-AARCH64: ; %bb.0: ; %entry ; CHECK-APPLE-AARCH64-NEXT: sub sp, sp, #64 +; CHECK-APPLE-AARCH64-NEXT: .cfi_def_cfa_offset 64 ; CHECK-APPLE-AARCH64-NEXT: stp x22, x21, [sp, #16] ; 16-byte Folded Spill ; CHECK-APPLE-AARCH64-NEXT: stp x20, x19, [sp, #32] ; 16-byte Folded Spill ; CHECK-APPLE-AARCH64-NEXT: stp x29, x30, [sp, #48] ; 16-byte Folded Spill @@ -104,6 +107,7 @@ ; CHECK-O0-AARCH64-LABEL: caller: ; CHECK-O0-AARCH64: ; %bb.0: ; %entry ; CHECK-O0-AARCH64-NEXT: sub sp, sp, #64 +; CHECK-O0-AARCH64-NEXT: .cfi_def_cfa_offset 64 ; CHECK-O0-AARCH64-NEXT: stp x22, x21, [sp, #32] ; 16-byte Folded Spill ; CHECK-O0-AARCH64-NEXT: stp x29, x30, [sp, #48] ; 16-byte Folded Spill ; CHECK-O0-AARCH64-NEXT: add x29, sp, #48 @@ -135,6 +139,7 @@ ; CHECK-APPLE-ARM64_32-LABEL: caller: ; CHECK-APPLE-ARM64_32: ; %bb.0: ; %entry ; CHECK-APPLE-ARM64_32-NEXT: sub sp, sp, #64 +; CHECK-APPLE-ARM64_32-NEXT: .cfi_def_cfa_offset 64 ; CHECK-APPLE-ARM64_32-NEXT: stp x22, x21, [sp, #16] ; 16-byte Folded Spill ; CHECK-APPLE-ARM64_32-NEXT: stp x20, x19, [sp, #32] ; 16-byte Folded Spill ; CHECK-APPLE-ARM64_32-NEXT: stp x29, x30, [sp, #48] ; 16-byte Folded Spill @@ -166,9 +171,9 @@ ; CHECK-O0-ARM64_32-LABEL: caller: ; CHECK-O0-ARM64_32: ; %bb.0: ; %entry ; CHECK-O0-ARM64_32-NEXT: sub sp, sp, #64 +; CHECK-O0-ARM64_32-NEXT: .cfi_def_cfa_offset 64 ; CHECK-O0-ARM64_32-NEXT: stp x22, x21, [sp, #32] ; 16-byte Folded Spill ; CHECK-O0-ARM64_32-NEXT: stp x29, x30, [sp, #48] ; 16-byte Folded Spill -; CHECK-O0-ARM64_32-NEXT: .cfi_def_cfa_offset 64 ; CHECK-O0-ARM64_32-NEXT: .cfi_offset w30, -8 ; CHECK-O0-ARM64_32-NEXT: .cfi_offset w29, -16 ; CHECK-O0-ARM64_32-NEXT: .cfi_offset w21, -24 @@ -219,6 +224,7 @@ ; CHECK-APPLE-AARCH64-LABEL: caller2: ; CHECK-APPLE-AARCH64: ; %bb.0: ; %entry ; CHECK-APPLE-AARCH64-NEXT: sub sp, sp, #80 +; CHECK-APPLE-AARCH64-NEXT: .cfi_def_cfa_offset 80 ; CHECK-APPLE-AARCH64-NEXT: stp d9, d8, [sp, #16] ; 16-byte Folded Spill ; CHECK-APPLE-AARCH64-NEXT: stp x22, x21, [sp, #32] ; 16-byte Folded Spill ; CHECK-APPLE-AARCH64-NEXT: stp x20, x19, [sp, #48] ; 16-byte Folded Spill @@ -261,6 +267,7 @@ ; CHECK-O0-AARCH64-LABEL: caller2: ; CHECK-O0-AARCH64: ; %bb.0: ; %entry ; CHECK-O0-AARCH64-NEXT: sub sp, sp, #64 +; CHECK-O0-AARCH64-NEXT: .cfi_def_cfa_offset 64 ; CHECK-O0-AARCH64-NEXT: stp x22, x21, [sp, #32] ; 16-byte Folded Spill ; CHECK-O0-AARCH64-NEXT: stp x29, x30, [sp, #48] ; 16-byte Folded Spill ; CHECK-O0-AARCH64-NEXT: add x29, sp, #48 @@ -301,6 +308,7 @@ ; CHECK-APPLE-ARM64_32-LABEL: caller2: ; CHECK-APPLE-ARM64_32: ; %bb.0: ; %entry ; CHECK-APPLE-ARM64_32-NEXT: sub sp, sp, #80 +; CHECK-APPLE-ARM64_32-NEXT: .cfi_def_cfa_offset 80 ; CHECK-APPLE-ARM64_32-NEXT: stp d9, d8, [sp, #16] ; 16-byte Folded Spill ; CHECK-APPLE-ARM64_32-NEXT: stp x22, x21, [sp, #32] ; 16-byte Folded Spill ; CHECK-APPLE-ARM64_32-NEXT: stp x20, x19, [sp, #48] ; 16-byte Folded Spill @@ -343,9 +351,9 @@ ; CHECK-O0-ARM64_32-LABEL: caller2: ; CHECK-O0-ARM64_32: ; %bb.0: ; %entry ; CHECK-O0-ARM64_32-NEXT: sub sp, sp, #64 +; CHECK-O0-ARM64_32-NEXT: .cfi_def_cfa_offset 64 ; CHECK-O0-ARM64_32-NEXT: stp x22, x21, [sp, #32] ; 16-byte Folded Spill ; CHECK-O0-ARM64_32-NEXT: stp x29, x30, [sp, #48] ; 16-byte Folded Spill -; CHECK-O0-ARM64_32-NEXT: .cfi_def_cfa_offset 64 ; CHECK-O0-ARM64_32-NEXT: .cfi_offset w30, -8 ; CHECK-O0-ARM64_32-NEXT: .cfi_offset w29, -16 ; CHECK-O0-ARM64_32-NEXT: .cfi_offset w21, -24 @@ -411,6 +419,7 @@ ; CHECK-APPLE-LABEL: foo_if: ; CHECK-APPLE: ; %bb.0: ; %entry ; CHECK-APPLE-NEXT: stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill +; CHECK-APPLE-NEXT: .cfi_def_cfa_offset 16 ; CHECK-APPLE-NEXT: mov x29, sp ; CHECK-APPLE-NEXT: .cfi_def_cfa w29, 16 ; CHECK-APPLE-NEXT: .cfi_offset w30, -8 @@ -433,6 +442,7 @@ ; CHECK-O0-AARCH64-LABEL: foo_if: ; CHECK-O0-AARCH64: ; %bb.0: ; %entry ; CHECK-O0-AARCH64-NEXT: sub sp, sp, #32 +; CHECK-O0-AARCH64-NEXT: .cfi_def_cfa_offset 32 ; CHECK-O0-AARCH64-NEXT: stp x29, x30, [sp, #16] ; 16-byte Folded Spill ; CHECK-O0-AARCH64-NEXT: add x29, sp, #16 ; CHECK-O0-AARCH64-NEXT: .cfi_def_cfa w29, 16 @@ -461,8 +471,8 @@ ; CHECK-O0-ARM64_32-LABEL: foo_if: ; CHECK-O0-ARM64_32: ; %bb.0: ; %entry ; CHECK-O0-ARM64_32-NEXT: sub sp, sp, #32 -; CHECK-O0-ARM64_32-NEXT: str x30, [sp, #16] ; 8-byte Folded Spill ; CHECK-O0-ARM64_32-NEXT: .cfi_def_cfa_offset 32 +; CHECK-O0-ARM64_32-NEXT: str x30, [sp, #16] ; 8-byte Folded Spill ; CHECK-O0-ARM64_32-NEXT: .cfi_offset w30, -16 ; CHECK-O0-ARM64_32-NEXT: str x21, [sp, #8] ; 8-byte Folded Spill ; CHECK-O0-ARM64_32-NEXT: cbz w0, LBB3_2 @@ -508,6 +518,7 @@ ; CHECK-APPLE-LABEL: foo_loop: ; CHECK-APPLE: ; %bb.0: ; %entry ; CHECK-APPLE-NEXT: stp d9, d8, [sp, #-48]! ; 16-byte Folded Spill +; CHECK-APPLE-NEXT: .cfi_def_cfa_offset 48 ; CHECK-APPLE-NEXT: stp x20, x19, [sp, #16] ; 16-byte Folded Spill ; CHECK-APPLE-NEXT: stp x29, x30, [sp, #32] ; 16-byte Folded Spill ; CHECK-APPLE-NEXT: add x29, sp, #32 @@ -548,6 +559,7 @@ ; CHECK-O0-AARCH64-LABEL: foo_loop: ; CHECK-O0-AARCH64: ; %bb.0: ; %entry ; CHECK-O0-AARCH64-NEXT: sub sp, sp, #48 +; CHECK-O0-AARCH64-NEXT: .cfi_def_cfa_offset 48 ; CHECK-O0-AARCH64-NEXT: stp x29, x30, [sp, #32] ; 16-byte Folded Spill ; CHECK-O0-AARCH64-NEXT: add x29, sp, #32 ; CHECK-O0-AARCH64-NEXT: .cfi_def_cfa w29, 16 @@ -591,8 +603,8 @@ ; CHECK-O0-ARM64_32-LABEL: foo_loop: ; CHECK-O0-ARM64_32: ; %bb.0: ; %entry ; CHECK-O0-ARM64_32-NEXT: sub sp, sp, #48 -; CHECK-O0-ARM64_32-NEXT: str x30, [sp, #32] ; 8-byte Folded Spill ; CHECK-O0-ARM64_32-NEXT: .cfi_def_cfa_offset 48 +; CHECK-O0-ARM64_32-NEXT: str x30, [sp, #32] ; 8-byte Folded Spill ; CHECK-O0-ARM64_32-NEXT: .cfi_offset w30, -16 ; CHECK-O0-ARM64_32-NEXT: str s0, [sp, #16] ; 4-byte Folded Spill ; CHECK-O0-ARM64_32-NEXT: str w0, [sp, #20] ; 4-byte Folded Spill @@ -667,6 +679,7 @@ ; CHECK-APPLE-LABEL: foo_sret: ; CHECK-APPLE: ; %bb.0: ; %entry ; CHECK-APPLE-NEXT: stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill +; CHECK-APPLE-NEXT: .cfi_def_cfa_offset 32 ; CHECK-APPLE-NEXT: stp x29, x30, [sp, #16] ; 16-byte Folded Spill ; CHECK-APPLE-NEXT: add x29, sp, #16 ; CHECK-APPLE-NEXT: .cfi_def_cfa w29, 16 @@ -689,6 +702,7 @@ ; CHECK-O0-AARCH64-LABEL: foo_sret: ; CHECK-O0-AARCH64: ; %bb.0: ; %entry ; CHECK-O0-AARCH64-NEXT: sub sp, sp, #32 +; CHECK-O0-AARCH64-NEXT: .cfi_def_cfa_offset 32 ; CHECK-O0-AARCH64-NEXT: stp x29, x30, [sp, #16] ; 16-byte Folded Spill ; CHECK-O0-AARCH64-NEXT: add x29, sp, #16 ; CHECK-O0-AARCH64-NEXT: .cfi_def_cfa w29, 16 @@ -713,8 +727,8 @@ ; CHECK-O0-ARM64_32-LABEL: foo_sret: ; CHECK-O0-ARM64_32: ; %bb.0: ; %entry ; CHECK-O0-ARM64_32-NEXT: sub sp, sp, #32 -; CHECK-O0-ARM64_32-NEXT: str x30, [sp, #16] ; 8-byte Folded Spill ; CHECK-O0-ARM64_32-NEXT: .cfi_def_cfa_offset 32 +; CHECK-O0-ARM64_32-NEXT: str x30, [sp, #16] ; 8-byte Folded Spill ; CHECK-O0-ARM64_32-NEXT: .cfi_offset w30, -16 ; CHECK-O0-ARM64_32-NEXT: str w0, [sp, #12] ; 4-byte Folded Spill ; CHECK-O0-ARM64_32-NEXT: str x8, [sp] ; 8-byte Folded Spill @@ -750,6 +764,7 @@ ; CHECK-APPLE-AARCH64-LABEL: caller3: ; CHECK-APPLE-AARCH64: ; %bb.0: ; %entry ; CHECK-APPLE-AARCH64-NEXT: sub sp, sp, #80 +; CHECK-APPLE-AARCH64-NEXT: .cfi_def_cfa_offset 80 ; CHECK-APPLE-AARCH64-NEXT: stp x22, x21, [sp, #32] ; 16-byte Folded Spill ; CHECK-APPLE-AARCH64-NEXT: stp x20, x19, [sp, #48] ; 16-byte Folded Spill ; CHECK-APPLE-AARCH64-NEXT: stp x29, x30, [sp, #64] ; 16-byte Folded Spill @@ -783,6 +798,7 @@ ; CHECK-O0-AARCH64-LABEL: caller3: ; CHECK-O0-AARCH64: ; %bb.0: ; %entry ; CHECK-O0-AARCH64-NEXT: sub sp, sp, #80 +; CHECK-O0-AARCH64-NEXT: .cfi_def_cfa_offset 80 ; CHECK-O0-AARCH64-NEXT: stp x22, x21, [sp, #48] ; 16-byte Folded Spill ; CHECK-O0-AARCH64-NEXT: stp x29, x30, [sp, #64] ; 16-byte Folded Spill ; CHECK-O0-AARCH64-NEXT: add x29, sp, #64 @@ -816,6 +832,7 @@ ; CHECK-APPLE-ARM64_32-LABEL: caller3: ; CHECK-APPLE-ARM64_32: ; %bb.0: ; %entry ; CHECK-APPLE-ARM64_32-NEXT: sub sp, sp, #80 +; CHECK-APPLE-ARM64_32-NEXT: .cfi_def_cfa_offset 80 ; CHECK-APPLE-ARM64_32-NEXT: stp x22, x21, [sp, #32] ; 16-byte Folded Spill ; CHECK-APPLE-ARM64_32-NEXT: stp x20, x19, [sp, #48] ; 16-byte Folded Spill ; CHECK-APPLE-ARM64_32-NEXT: stp x29, x30, [sp, #64] ; 16-byte Folded Spill @@ -849,9 +866,9 @@ ; CHECK-O0-ARM64_32-LABEL: caller3: ; CHECK-O0-ARM64_32: ; %bb.0: ; %entry ; CHECK-O0-ARM64_32-NEXT: sub sp, sp, #80 +; CHECK-O0-ARM64_32-NEXT: .cfi_def_cfa_offset 80 ; CHECK-O0-ARM64_32-NEXT: stp x22, x21, [sp, #48] ; 16-byte Folded Spill ; CHECK-O0-ARM64_32-NEXT: stp x29, x30, [sp, #64] ; 16-byte Folded Spill -; CHECK-O0-ARM64_32-NEXT: .cfi_def_cfa_offset 80 ; CHECK-O0-ARM64_32-NEXT: .cfi_offset w30, -8 ; CHECK-O0-ARM64_32-NEXT: .cfi_offset w29, -16 ; CHECK-O0-ARM64_32-NEXT: .cfi_offset w21, -24 @@ -910,6 +927,7 @@ ; CHECK-APPLE-AARCH64-LABEL: foo_vararg: ; CHECK-APPLE-AARCH64: ; %bb.0: ; %entry ; CHECK-APPLE-AARCH64-NEXT: sub sp, sp, #48 +; CHECK-APPLE-AARCH64-NEXT: .cfi_def_cfa_offset 48 ; CHECK-APPLE-AARCH64-NEXT: stp x29, x30, [sp, #32] ; 16-byte Folded Spill ; CHECK-APPLE-AARCH64-NEXT: add x29, sp, #32 ; CHECK-APPLE-AARCH64-NEXT: .cfi_def_cfa w29, 16 @@ -936,6 +954,7 @@ ; CHECK-O0-AARCH64-LABEL: foo_vararg: ; CHECK-O0-AARCH64: ; %bb.0: ; %entry ; CHECK-O0-AARCH64-NEXT: sub sp, sp, #48 +; CHECK-O0-AARCH64-NEXT: .cfi_def_cfa_offset 48 ; CHECK-O0-AARCH64-NEXT: stp x29, x30, [sp, #32] ; 16-byte Folded Spill ; CHECK-O0-AARCH64-NEXT: add x29, sp, #32 ; CHECK-O0-AARCH64-NEXT: .cfi_def_cfa w29, 16 @@ -972,6 +991,7 @@ ; CHECK-APPLE-ARM64_32-LABEL: foo_vararg: ; CHECK-APPLE-ARM64_32: ; %bb.0: ; %entry ; CHECK-APPLE-ARM64_32-NEXT: sub sp, sp, #48 +; CHECK-APPLE-ARM64_32-NEXT: .cfi_def_cfa_offset 48 ; CHECK-APPLE-ARM64_32-NEXT: stp x29, x30, [sp, #32] ; 16-byte Folded Spill ; CHECK-APPLE-ARM64_32-NEXT: add x29, sp, #32 ; CHECK-APPLE-ARM64_32-NEXT: .cfi_def_cfa w29, 16 @@ -1003,8 +1023,8 @@ ; CHECK-O0-ARM64_32-LABEL: foo_vararg: ; CHECK-O0-ARM64_32: ; %bb.0: ; %entry ; CHECK-O0-ARM64_32-NEXT: sub sp, sp, #48 -; CHECK-O0-ARM64_32-NEXT: str x30, [sp, #32] ; 8-byte Folded Spill ; CHECK-O0-ARM64_32-NEXT: .cfi_def_cfa_offset 48 +; CHECK-O0-ARM64_32-NEXT: str x30, [sp, #32] ; 8-byte Folded Spill ; CHECK-O0-ARM64_32-NEXT: .cfi_offset w30, -16 ; CHECK-O0-ARM64_32-NEXT: mov w8, #16 ; CHECK-O0-ARM64_32-NEXT: mov w0, w8 @@ -1075,6 +1095,7 @@ ; CHECK-APPLE-AARCH64-LABEL: caller4: ; CHECK-APPLE-AARCH64: ; %bb.0: ; %entry ; CHECK-APPLE-AARCH64-NEXT: sub sp, sp, #96 +; CHECK-APPLE-AARCH64-NEXT: .cfi_def_cfa_offset 96 ; CHECK-APPLE-AARCH64-NEXT: stp x22, x21, [sp, #48] ; 16-byte Folded Spill ; CHECK-APPLE-AARCH64-NEXT: stp x20, x19, [sp, #64] ; 16-byte Folded Spill ; CHECK-APPLE-AARCH64-NEXT: stp x29, x30, [sp, #80] ; 16-byte Folded Spill @@ -1113,6 +1134,7 @@ ; CHECK-O0-AARCH64-LABEL: caller4: ; CHECK-O0-AARCH64: ; %bb.0: ; %entry ; CHECK-O0-AARCH64-NEXT: sub sp, sp, #96 +; CHECK-O0-AARCH64-NEXT: .cfi_def_cfa_offset 96 ; CHECK-O0-AARCH64-NEXT: stp x22, x21, [sp, #64] ; 16-byte Folded Spill ; CHECK-O0-AARCH64-NEXT: stp x29, x30, [sp, #80] ; 16-byte Folded Spill ; CHECK-O0-AARCH64-NEXT: add x29, sp, #80 @@ -1160,6 +1182,7 @@ ; CHECK-APPLE-ARM64_32-LABEL: caller4: ; CHECK-APPLE-ARM64_32: ; %bb.0: ; %entry ; CHECK-APPLE-ARM64_32-NEXT: sub sp, sp, #80 +; CHECK-APPLE-ARM64_32-NEXT: .cfi_def_cfa_offset 80 ; CHECK-APPLE-ARM64_32-NEXT: stp x22, x21, [sp, #32] ; 16-byte Folded Spill ; CHECK-APPLE-ARM64_32-NEXT: stp x20, x19, [sp, #48] ; 16-byte Folded Spill ; CHECK-APPLE-ARM64_32-NEXT: stp x29, x30, [sp, #64] ; 16-byte Folded Spill @@ -1200,9 +1223,9 @@ ; CHECK-O0-ARM64_32-LABEL: caller4: ; CHECK-O0-ARM64_32: ; %bb.0: ; %entry ; CHECK-O0-ARM64_32-NEXT: sub sp, sp, #80 +; CHECK-O0-ARM64_32-NEXT: .cfi_def_cfa_offset 80 ; CHECK-O0-ARM64_32-NEXT: stp x22, x21, [sp, #48] ; 16-byte Folded Spill ; CHECK-O0-ARM64_32-NEXT: stp x29, x30, [sp, #64] ; 16-byte Folded Spill -; CHECK-O0-ARM64_32-NEXT: .cfi_def_cfa_offset 80 ; CHECK-O0-ARM64_32-NEXT: .cfi_offset w30, -8 ; CHECK-O0-ARM64_32-NEXT: .cfi_offset w29, -16 ; CHECK-O0-ARM64_32-NEXT: .cfi_offset w21, -24 @@ -1279,6 +1302,7 @@ ; CHECK-APPLE-LABEL: tailcallswifterror: ; CHECK-APPLE: ; %bb.0: ; %entry ; CHECK-APPLE-NEXT: stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill +; CHECK-APPLE-NEXT: .cfi_def_cfa_offset 16 ; CHECK-APPLE-NEXT: mov x29, sp ; CHECK-APPLE-NEXT: .cfi_def_cfa w29, 16 ; CHECK-APPLE-NEXT: .cfi_offset w30, -8 @@ -1290,6 +1314,7 @@ ; CHECK-O0-AARCH64-LABEL: tailcallswifterror: ; CHECK-O0-AARCH64: ; %bb.0: ; %entry ; CHECK-O0-AARCH64-NEXT: stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill +; CHECK-O0-AARCH64-NEXT: .cfi_def_cfa_offset 16 ; CHECK-O0-AARCH64-NEXT: mov x29, sp ; CHECK-O0-AARCH64-NEXT: .cfi_def_cfa w29, 16 ; CHECK-O0-AARCH64-NEXT: .cfi_offset w30, -8 @@ -1314,6 +1339,7 @@ ; CHECK-APPLE-LABEL: tailcallswifterror_swiftcc: ; CHECK-APPLE: ; %bb.0: ; %entry ; CHECK-APPLE-NEXT: stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill +; CHECK-APPLE-NEXT: .cfi_def_cfa_offset 16 ; CHECK-APPLE-NEXT: mov x29, sp ; CHECK-APPLE-NEXT: .cfi_def_cfa w29, 16 ; CHECK-APPLE-NEXT: .cfi_offset w30, -8 @@ -1325,6 +1351,7 @@ ; CHECK-O0-AARCH64-LABEL: tailcallswifterror_swiftcc: ; CHECK-O0-AARCH64: ; %bb.0: ; %entry ; CHECK-O0-AARCH64-NEXT: stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill +; CHECK-O0-AARCH64-NEXT: .cfi_def_cfa_offset 16 ; CHECK-O0-AARCH64-NEXT: mov x29, sp ; CHECK-O0-AARCH64-NEXT: .cfi_def_cfa w29, 16 ; CHECK-O0-AARCH64-NEXT: .cfi_offset w30, -8 @@ -1350,6 +1377,7 @@ ; CHECK-APPLE-LABEL: swifterror_clobber: ; CHECK-APPLE: ; %bb.0: ; CHECK-APPLE-NEXT: stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill +; CHECK-APPLE-NEXT: .cfi_def_cfa_offset 16 ; CHECK-APPLE-NEXT: mov x29, sp ; CHECK-APPLE-NEXT: .cfi_def_cfa w29, 16 ; CHECK-APPLE-NEXT: .cfi_offset w30, -8 @@ -1365,6 +1393,7 @@ ; CHECK-O0-AARCH64-LABEL: swifterror_clobber: ; CHECK-O0-AARCH64: ; %bb.0: ; CHECK-O0-AARCH64-NEXT: sub sp, sp, #32 +; CHECK-O0-AARCH64-NEXT: .cfi_def_cfa_offset 32 ; CHECK-O0-AARCH64-NEXT: stp x29, x30, [sp, #16] ; 16-byte Folded Spill ; CHECK-O0-AARCH64-NEXT: add x29, sp, #16 ; CHECK-O0-AARCH64-NEXT: .cfi_def_cfa w29, 16 @@ -1398,6 +1427,7 @@ ; CHECK-APPLE-LABEL: swifterror_reg_clobber: ; CHECK-APPLE: ; %bb.0: ; CHECK-APPLE-NEXT: stp x22, x21, [sp, #-32]! ; 16-byte Folded Spill +; CHECK-APPLE-NEXT: .cfi_def_cfa_offset 32 ; CHECK-APPLE-NEXT: stp x29, x30, [sp, #16] ; 16-byte Folded Spill ; CHECK-APPLE-NEXT: add x29, sp, #16 ; CHECK-APPLE-NEXT: .cfi_def_cfa w29, 16 @@ -1415,6 +1445,7 @@ ; CHECK-O0-AARCH64-LABEL: swifterror_reg_clobber: ; CHECK-O0-AARCH64: ; %bb.0: ; CHECK-O0-AARCH64-NEXT: stp x22, x21, [sp, #-32]! ; 16-byte Folded Spill +; CHECK-O0-AARCH64-NEXT: .cfi_def_cfa_offset 32 ; CHECK-O0-AARCH64-NEXT: stp x29, x30, [sp, #16] ; 16-byte Folded Spill ; CHECK-O0-AARCH64-NEXT: add x29, sp, #16 ; CHECK-O0-AARCH64-NEXT: .cfi_def_cfa w29, 16 @@ -1448,6 +1479,7 @@ ; CHECK-APPLE-LABEL: params_in_reg: ; CHECK-APPLE: ; %bb.0: ; CHECK-APPLE-NEXT: sub sp, sp, #112 +; CHECK-APPLE-NEXT: .cfi_def_cfa_offset 112 ; CHECK-APPLE-NEXT: stp x21, x28, [sp, #8] ; 16-byte Folded Spill ; CHECK-APPLE-NEXT: stp x27, x26, [sp, #32] ; 16-byte Folded Spill ; CHECK-APPLE-NEXT: stp x25, x24, [sp, #48] ; 16-byte Folded Spill @@ -1509,6 +1541,7 @@ ; CHECK-O0-AARCH64-LABEL: params_in_reg: ; CHECK-O0-AARCH64: ; %bb.0: ; CHECK-O0-AARCH64-NEXT: sub sp, sp, #128 +; CHECK-O0-AARCH64-NEXT: .cfi_def_cfa_offset 128 ; CHECK-O0-AARCH64-NEXT: str x20, [sp, #96] ; 8-byte Folded Spill ; CHECK-O0-AARCH64-NEXT: stp x29, x30, [sp, #112] ; 16-byte Folded Spill ; CHECK-O0-AARCH64-NEXT: add x29, sp, #112 @@ -1566,8 +1599,8 @@ ; CHECK-O0-ARM64_32-LABEL: params_in_reg: ; CHECK-O0-ARM64_32: ; %bb.0: ; CHECK-O0-ARM64_32-NEXT: sub sp, sp, #112 -; CHECK-O0-ARM64_32-NEXT: stp x20, x30, [sp, #96] ; 16-byte Folded Spill ; CHECK-O0-ARM64_32-NEXT: .cfi_def_cfa_offset 112 +; CHECK-O0-ARM64_32-NEXT: stp x20, x30, [sp, #96] ; 16-byte Folded Spill ; CHECK-O0-ARM64_32-NEXT: .cfi_offset w30, -8 ; CHECK-O0-ARM64_32-NEXT: .cfi_offset w20, -16 ; CHECK-O0-ARM64_32-NEXT: str x21, [sp, #80] ; 8-byte Folded Spill @@ -1627,6 +1660,7 @@ ; CHECK-APPLE-LABEL: params_and_return_in_reg: ; CHECK-APPLE: ; %bb.0: ; CHECK-APPLE-NEXT: sub sp, sp, #128 +; CHECK-APPLE-NEXT: .cfi_def_cfa_offset 128 ; CHECK-APPLE-NEXT: stp x20, x28, [sp, #24] ; 16-byte Folded Spill ; CHECK-APPLE-NEXT: stp x27, x26, [sp, #48] ; 16-byte Folded Spill ; CHECK-APPLE-NEXT: stp x25, x24, [sp, #64] ; 16-byte Folded Spill @@ -1717,6 +1751,7 @@ ; CHECK-O0-AARCH64-LABEL: params_and_return_in_reg: ; CHECK-O0-AARCH64: ; %bb.0: ; CHECK-O0-AARCH64-NEXT: sub sp, sp, #272 +; CHECK-O0-AARCH64-NEXT: .cfi_def_cfa_offset 272 ; CHECK-O0-AARCH64-NEXT: stp x28, x20, [sp, #240] ; 16-byte Folded Spill ; CHECK-O0-AARCH64-NEXT: stp x29, x30, [sp, #256] ; 16-byte Folded Spill ; CHECK-O0-AARCH64-NEXT: add x29, sp, #256 @@ -1824,9 +1859,9 @@ ; CHECK-O0-ARM64_32-LABEL: params_and_return_in_reg: ; CHECK-O0-ARM64_32: ; %bb.0: ; CHECK-O0-ARM64_32-NEXT: sub sp, sp, #272 +; CHECK-O0-ARM64_32-NEXT: .cfi_def_cfa_offset 272 ; CHECK-O0-ARM64_32-NEXT: str x28, [sp, #240] ; 8-byte Folded Spill ; CHECK-O0-ARM64_32-NEXT: stp x20, x30, [sp, #256] ; 16-byte Folded Spill -; CHECK-O0-ARM64_32-NEXT: .cfi_def_cfa_offset 272 ; CHECK-O0-ARM64_32-NEXT: .cfi_offset w30, -8 ; CHECK-O0-ARM64_32-NEXT: .cfi_offset w20, -16 ; CHECK-O0-ARM64_32-NEXT: .cfi_offset w28, -32 @@ -1943,6 +1978,7 @@ ; CHECK-APPLE-LABEL: tailcall_from_swifterror: ; CHECK-APPLE: ; %bb.0: ; %entry ; CHECK-APPLE-NEXT: str x19, [sp, #-32]! ; 8-byte Folded Spill +; CHECK-APPLE-NEXT: .cfi_def_cfa_offset 32 ; CHECK-APPLE-NEXT: stp x29, x30, [sp, #16] ; 16-byte Folded Spill ; CHECK-APPLE-NEXT: add x29, sp, #16 ; CHECK-APPLE-NEXT: .cfi_def_cfa w29, 16 @@ -1960,6 +1996,7 @@ ; CHECK-O0-AARCH64-LABEL: tailcall_from_swifterror: ; CHECK-O0-AARCH64: ; %bb.0: ; %entry ; CHECK-O0-AARCH64-NEXT: sub sp, sp, #32 +; CHECK-O0-AARCH64-NEXT: .cfi_def_cfa_offset 32 ; CHECK-O0-AARCH64-NEXT: stp x29, x30, [sp, #16] ; 16-byte Folded Spill ; CHECK-O0-AARCH64-NEXT: add x29, sp, #16 ; CHECK-O0-AARCH64-NEXT: .cfi_def_cfa w29, 16 @@ -1976,8 +2013,8 @@ ; CHECK-O0-ARM64_32-LABEL: tailcall_from_swifterror: ; CHECK-O0-ARM64_32: ; %bb.0: ; %entry ; CHECK-O0-ARM64_32-NEXT: sub sp, sp, #32 -; CHECK-O0-ARM64_32-NEXT: str x30, [sp, #16] ; 8-byte Folded Spill ; CHECK-O0-ARM64_32-NEXT: .cfi_def_cfa_offset 32 +; CHECK-O0-ARM64_32-NEXT: str x30, [sp, #16] ; 8-byte Folded Spill ; CHECK-O0-ARM64_32-NEXT: .cfi_offset w30, -16 ; CHECK-O0-ARM64_32-NEXT: str x21, [sp, #8] ; 8-byte Folded Spill ; CHECK-O0-ARM64_32-NEXT: mov x0, xzr @@ -1998,6 +2035,7 @@ ; CHECK-APPLE-LABEL: testAssign: ; CHECK-APPLE: ; %bb.0: ; %entry ; CHECK-APPLE-NEXT: sub sp, sp, #48 +; CHECK-APPLE-NEXT: .cfi_def_cfa_offset 48 ; CHECK-APPLE-NEXT: stp x22, x21, [sp, #16] ; 16-byte Folded Spill ; CHECK-APPLE-NEXT: stp x29, x30, [sp, #32] ; 16-byte Folded Spill ; CHECK-APPLE-NEXT: add x29, sp, #32 @@ -2017,6 +2055,7 @@ ; CHECK-O0-AARCH64-LABEL: testAssign: ; CHECK-O0-AARCH64: ; %bb.0: ; %entry ; CHECK-O0-AARCH64-NEXT: sub sp, sp, #48 +; CHECK-O0-AARCH64-NEXT: .cfi_def_cfa_offset 48 ; CHECK-O0-AARCH64-NEXT: stp x22, x21, [sp, #16] ; 16-byte Folded Spill ; CHECK-O0-AARCH64-NEXT: stp x29, x30, [sp, #32] ; 16-byte Folded Spill ; CHECK-O0-AARCH64-NEXT: add x29, sp, #32 @@ -2039,9 +2078,9 @@ ; CHECK-O0-ARM64_32-LABEL: testAssign: ; CHECK-O0-ARM64_32: ; %bb.0: ; %entry ; CHECK-O0-ARM64_32-NEXT: sub sp, sp, #48 +; CHECK-O0-ARM64_32-NEXT: .cfi_def_cfa_offset 48 ; CHECK-O0-ARM64_32-NEXT: stp x22, x21, [sp, #16] ; 16-byte Folded Spill ; CHECK-O0-ARM64_32-NEXT: stp x29, x30, [sp, #32] ; 16-byte Folded Spill -; CHECK-O0-ARM64_32-NEXT: .cfi_def_cfa_offset 48 ; CHECK-O0-ARM64_32-NEXT: .cfi_offset w30, -8 ; CHECK-O0-ARM64_32-NEXT: .cfi_offset w29, -16 ; CHECK-O0-ARM64_32-NEXT: .cfi_offset w21, -24 diff --git a/llvm/test/CodeGen/AArch64/unwind-preserved-from-mir.mir b/llvm/test/CodeGen/AArch64/unwind-preserved-from-mir.mir --- a/llvm/test/CodeGen/AArch64/unwind-preserved-from-mir.mir +++ b/llvm/test/CodeGen/AArch64/unwind-preserved-from-mir.mir @@ -53,6 +53,7 @@ ; CHECK: successors: %bb.1, %bb.2 ; CHECK: liveins: $q0, $q22, $q23, $q20, $q21, $q18, $q19, $q16, $q17, $q14, $q15, $q12, $q13, $q10, $q11, $q8, $q9, $lr, $fp ; CHECK: $sp = frame-setup SUBXri $sp, 304, 0 + ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 304 ; CHECK: frame-setup STPQi killed $q23, killed $q22, $sp, 2 :: (store (s128) into %stack.19), (store (s128) into %stack.18) ; CHECK: frame-setup STPQi killed $q21, killed $q20, $sp, 4 :: (store (s128) into %stack.17), (store (s128) into %stack.16) ; CHECK: frame-setup STPQi killed $q19, killed $q18, $sp, 6 :: (store (s128) into %stack.15), (store (s128) into %stack.14) @@ -62,7 +63,6 @@ ; CHECK: frame-setup STPQi killed $q11, killed $q10, $sp, 14 :: (store (s128) into %stack.7), (store (s128) into %stack.6) ; CHECK: frame-setup STPQi killed $q9, killed $q8, $sp, 16 :: (store (s128) into %stack.5), (store (s128) into %stack.4) ; CHECK: frame-setup STPXi killed $fp, killed $lr, $sp, 36 :: (store (s64) into %stack.3), (store (s64) into %stack.2) - ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 304 ; CHECK: frame-setup CFI_INSTRUCTION offset $w30, -8 ; CHECK: frame-setup CFI_INSTRUCTION offset $w29, -16 ; CHECK: frame-setup CFI_INSTRUCTION offset $b8, -32 diff --git a/llvm/test/CodeGen/AArch64/unwind-preserved.ll b/llvm/test/CodeGen/AArch64/unwind-preserved.ll --- a/llvm/test/CodeGen/AArch64/unwind-preserved.ll +++ b/llvm/test/CodeGen/AArch64/unwind-preserved.ll @@ -9,7 +9,11 @@ ; CHECK-NEXT: .cfi_startproc ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: .cfi_offset w30, -8 +; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: addvl sp, sp, #-18 +; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG ; CHECK-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill @@ -38,8 +42,6 @@ ; CHECK-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill -; CHECK-NEXT: addvl sp, sp, #-2 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xa0, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 160 * VG ; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG ; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG ; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG @@ -48,8 +50,8 @@ ; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 16 - 48 * VG ; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG ; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG -; CHECK-NEXT: .cfi_offset w30, -8 -; CHECK-NEXT: .cfi_offset w29, -16 +; CHECK-NEXT: addvl sp, sp, #-2 +; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xa0, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 160 * VG ; CHECK-NEXT: str z0, [sp] // 16-byte Folded Spill ; CHECK-NEXT: .Ltmp0: ; CHECK-NEXT: bl may_throw_sve @@ -131,7 +133,11 @@ ; GISEL-NEXT: .cfi_startproc ; GISEL-NEXT: // %bb.0: ; GISEL-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill +; GISEL-NEXT: .cfi_def_cfa_offset 16 +; GISEL-NEXT: .cfi_offset w30, -8 +; GISEL-NEXT: .cfi_offset w29, -16 ; GISEL-NEXT: addvl sp, sp, #-18 +; GISEL-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG ; GISEL-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill ; GISEL-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill ; GISEL-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill @@ -160,8 +166,6 @@ ; GISEL-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill ; GISEL-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; GISEL-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill -; GISEL-NEXT: addvl sp, sp, #-2 -; GISEL-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xa0, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 160 * VG ; GISEL-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG ; GISEL-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG ; GISEL-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG @@ -170,8 +174,8 @@ ; GISEL-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 16 - 48 * VG ; GISEL-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG ; GISEL-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG -; GISEL-NEXT: .cfi_offset w30, -8 -; GISEL-NEXT: .cfi_offset w29, -16 +; GISEL-NEXT: addvl sp, sp, #-2 +; GISEL-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xa0, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 160 * VG ; GISEL-NEXT: str z0, [sp] // 16-byte Folded Spill ; GISEL-NEXT: .Ltmp0: ; GISEL-NEXT: bl may_throw_sve @@ -265,6 +269,7 @@ ; CHECK-NEXT: .cfi_startproc ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: sub sp, sp, #304 +; CHECK-NEXT: .cfi_def_cfa_offset 304 ; CHECK-NEXT: stp q23, q22, [sp, #32] // 32-byte Folded Spill ; CHECK-NEXT: stp q21, q20, [sp, #64] // 32-byte Folded Spill ; CHECK-NEXT: stp q19, q18, [sp, #96] // 32-byte Folded Spill @@ -274,7 +279,6 @@ ; CHECK-NEXT: stp q11, q10, [sp, #224] // 32-byte Folded Spill ; CHECK-NEXT: stp q9, q8, [sp, #256] // 32-byte Folded Spill ; CHECK-NEXT: stp x29, x30, [sp, #288] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 304 ; CHECK-NEXT: .cfi_offset w30, -8 ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: .cfi_offset b8, -32 @@ -332,6 +336,7 @@ ; GISEL-NEXT: .cfi_startproc ; GISEL-NEXT: // %bb.0: ; GISEL-NEXT: sub sp, sp, #304 +; GISEL-NEXT: .cfi_def_cfa_offset 304 ; GISEL-NEXT: stp q23, q22, [sp, #32] // 32-byte Folded Spill ; GISEL-NEXT: stp q21, q20, [sp, #64] // 32-byte Folded Spill ; GISEL-NEXT: stp q19, q18, [sp, #96] // 32-byte Folded Spill @@ -341,7 +346,6 @@ ; GISEL-NEXT: stp q11, q10, [sp, #224] // 32-byte Folded Spill ; GISEL-NEXT: stp q9, q8, [sp, #256] // 32-byte Folded Spill ; GISEL-NEXT: stp x29, x30, [sp, #288] // 16-byte Folded Spill -; GISEL-NEXT: .cfi_def_cfa_offset 304 ; GISEL-NEXT: .cfi_offset w30, -8 ; GISEL-NEXT: .cfi_offset w29, -16 ; GISEL-NEXT: .cfi_offset b8, -32 diff --git a/llvm/test/Transforms/CodeGenPrepare/AArch64/large-offset-gep.ll b/llvm/test/Transforms/CodeGenPrepare/AArch64/large-offset-gep.ll --- a/llvm/test/Transforms/CodeGenPrepare/AArch64/large-offset-gep.ll +++ b/llvm/test/Transforms/CodeGenPrepare/AArch64/large-offset-gep.ll @@ -142,8 +142,8 @@ ; CHECK-NEXT: .cfi_lsda 0, .Lexception0 ; CHECK-NEXT: // %bb.0: // %entry ; CHECK-NEXT: stp x30, x21, [sp, #-32]! // 16-byte Folded Spill -; CHECK-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 32 +; CHECK-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill ; CHECK-NEXT: .cfi_offset w19, -8 ; CHECK-NEXT: .cfi_offset w20, -16 ; CHECK-NEXT: .cfi_offset w21, -24 diff --git a/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/aarch64_generated_funcs.ll.generated.expected b/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/aarch64_generated_funcs.ll.generated.expected --- a/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/aarch64_generated_funcs.ll.generated.expected +++ b/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/aarch64_generated_funcs.ll.generated.expected @@ -65,6 +65,7 @@ ; CHECK-LABEL: check_boundaries: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #48 +; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: stp x29, x30, [sp, #32] // 16-byte Folded Spill ; CHECK-NEXT: add x29, sp, #32 ; CHECK-NEXT: .cfi_def_cfa w29, 16 @@ -97,6 +98,7 @@ ; CHECK-LABEL: main: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #48 +; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: stp x29, x30, [sp, #32] // 16-byte Folded Spill ; CHECK-NEXT: add x29, sp, #32 ; CHECK-NEXT: .cfi_def_cfa w29, 16 diff --git a/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/aarch64_generated_funcs.ll.nogenerated.expected b/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/aarch64_generated_funcs.ll.nogenerated.expected --- a/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/aarch64_generated_funcs.ll.nogenerated.expected +++ b/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/aarch64_generated_funcs.ll.nogenerated.expected @@ -6,6 +6,7 @@ ; CHECK-LABEL: check_boundaries: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #48 +; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: stp x29, x30, [sp, #32] // 16-byte Folded Spill ; CHECK-NEXT: add x29, sp, #32 ; CHECK-NEXT: .cfi_def_cfa w29, 16 @@ -74,6 +75,7 @@ ; CHECK-LABEL: main: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #48 +; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: stp x29, x30, [sp, #32] // 16-byte Folded Spill ; CHECK-NEXT: add x29, sp, #32 ; CHECK-NEXT: .cfi_def_cfa w29, 16