Index: lib/Target/RISCV/MCTargetDesc/RISCVMCAsmInfo.cpp =================================================================== --- lib/Target/RISCV/MCTargetDesc/RISCVMCAsmInfo.cpp +++ lib/Target/RISCV/MCTargetDesc/RISCVMCAsmInfo.cpp @@ -21,6 +21,7 @@ CommentString = "#"; AlignmentIsInBytes = false; SupportsDebugInformation = true; + ExceptionsType = ExceptionHandling::DwarfCFI; Data16bitsDirective = "\t.half\t"; Data32bitsDirective = "\t.word\t"; } Index: lib/Target/RISCV/MCTargetDesc/RISCVMCTargetDesc.cpp =================================================================== --- lib/Target/RISCV/MCTargetDesc/RISCVMCTargetDesc.cpp +++ lib/Target/RISCV/MCTargetDesc/RISCVMCTargetDesc.cpp @@ -49,7 +49,13 @@ static MCAsmInfo *createRISCVMCAsmInfo(const MCRegisterInfo &MRI, const Triple &TT) { - return new RISCVMCAsmInfo(TT); + MCAsmInfo *MAI = new RISCVMCAsmInfo(TT); + + unsigned SP = MRI.getDwarfRegNum(RISCV::X2, true); + MCCFIInstruction Inst = MCCFIInstruction::createDefCfa(nullptr, SP, 0); + MAI->addInitialFrameState(Inst); + + return MAI; } static MCSubtargetInfo *createRISCVMCSubtargetInfo(const Triple &TT, Index: lib/Target/RISCV/RISCVFrameLowering.cpp =================================================================== --- lib/Target/RISCV/RISCVFrameLowering.cpp +++ lib/Target/RISCV/RISCVFrameLowering.cpp @@ -18,6 +18,7 @@ #include "llvm/CodeGen/MachineInstrBuilder.h" #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/RegisterScavenging.h" +#include "llvm/MC/MCDwarf.h" using namespace llvm; @@ -96,6 +97,8 @@ MachineFrameInfo &MFI = MF.getFrameInfo(); auto *RVFI = MF.getInfo(); + const RISCVRegisterInfo *RI = STI.getRegisterInfo(); + const RISCVInstrInfo *TII = STI.getInstrInfo(); MachineBasicBlock::iterator MBBI = MBB.begin(); unsigned FPReg = getFPReg(STI); @@ -119,6 +122,12 @@ // Allocate space on the stack if necessary. adjustReg(MBB, MBBI, DL, SPReg, SPReg, -StackSize, MachineInstr::FrameSetup); + // Emit ".cfi_def_cfa_offset StackSize" + unsigned CFIIndex = MF.addFrameInst( + MCCFIInstruction::createDefCfaOffset(nullptr, -StackSize)); + BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION)) + .addCFIIndex(CFIIndex); + // The frame pointer is callee-saved, and code has been generated for us to // save it to the stack. We need to skip over the storing of callee-saved // registers as the frame pointer must be modified after it has been saved @@ -128,10 +137,30 @@ const std::vector &CSI = MFI.getCalleeSavedInfo(); std::advance(MBBI, CSI.size()); + // Iterate over list of callee-saved registers and emit .cfi_offset + // directives. + for (std::vector::const_iterator I = CSI.begin(), + E = CSI.end(); + I != E; ++I) { + int64_t Offset = MFI.getObjectOffset(I->getFrameIdx()); + unsigned Reg = I->getReg(); + unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::createOffset( + nullptr, RI->getDwarfRegNum(Reg, true), Offset)); + BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION)) + .addCFIIndex(CFIIndex); + } + // Generate new FP. - if (hasFP(MF)) + if (hasFP(MF)) { adjustReg(MBB, MBBI, DL, FPReg, SPReg, StackSize - RVFI->getVarArgsSaveSize(), MachineInstr::FrameSetup); + + // Emit ".cfi_def_cfa $fp, 0" + unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::createDefCfa( + nullptr, RI->getDwarfRegNum(FPReg, true), 0)); + BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION)) + .addCFIIndex(CFIIndex); + } } void RISCVFrameLowering::emitEpilogue(MachineFunction &MF, @@ -141,6 +170,7 @@ MachineFrameInfo &MFI = MF.getFrameInfo(); auto *RVFI = MF.getInfo(); DebugLoc DL = MBBI->getDebugLoc(); + const RISCVInstrInfo *TII = STI.getInstrInfo(); unsigned FPReg = getFPReg(STI); unsigned SPReg = getSPReg(STI); @@ -161,8 +191,25 @@ MachineInstr::FrameDestroy); } + if (hasFP(MF)) { + // If there is frame pointer, before restoring callee-saved registers, we + // need adjust CFA to ($sp - FPOffset). emit ".cfi_def_cfa $sp, -FPOffset" + uint64_t FPOffset = StackSize - RVFI->getVarArgsSaveSize(); + unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::createDefCfa( + nullptr, RI->getDwarfRegNum(SPReg, true), -FPOffset)); + BuildMI(MBB, LastFrameDestroy, DL, TII->get(TargetOpcode::CFI_INSTRUCTION)) + .addCFIIndex(CFIIndex); + } + // Deallocate stack adjustReg(MBB, MBBI, DL, SPReg, SPReg, StackSize, MachineInstr::FrameDestroy); + + // After restoring $sp, we need to adjust CFA to $(sp + 0) + // Emit ".cfi_def_cfa_offset 0" + unsigned CFIIndex = + MF.addFrameInst(MCCFIInstruction::createDefCfaOffset(nullptr, 0)); + BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION)) + .addCFIIndex(CFIIndex); } int RISCVFrameLowering::getFrameIndexReference(const MachineFunction &MF, Index: test/CodeGen/RISCV/addc-adde-sube-subc.ll =================================================================== --- test/CodeGen/RISCV/addc-adde-sube-subc.ll +++ test/CodeGen/RISCV/addc-adde-sube-subc.ll @@ -12,6 +12,7 @@ ; RV32I-NEXT: sltu a0, a2, a0 ; RV32I-NEXT: add a1, a1, a0 ; RV32I-NEXT: mv a0, a2 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret %1 = add i64 %a, %b ret i64 %1 @@ -24,6 +25,7 @@ ; RV32I-NEXT: sltu a3, a0, a2 ; RV32I-NEXT: sub a1, a1, a3 ; RV32I-NEXT: sub a0, a0, a2 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret %1 = sub i64 %a, %b ret i64 %1 Index: test/CodeGen/RISCV/alu64.ll =================================================================== --- test/CodeGen/RISCV/alu64.ll +++ test/CodeGen/RISCV/alu64.ll @@ -379,11 +379,13 @@ ; RV64I-LABEL: addiw: ; RV64I: # %bb.0: ; RV64I-NEXT: addiw a0, a0, 123 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV32I-LABEL: addiw: ; RV32I: # %bb.0: ; RV32I-NEXT: addi a0, a0, 123 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret %1 = add i32 %a, 123 ret i32 %1 @@ -393,11 +395,13 @@ ; RV64I-LABEL: slliw: ; RV64I: # %bb.0: ; RV64I-NEXT: slliw a0, a0, 17 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV32I-LABEL: slliw: ; RV32I: # %bb.0: ; RV32I-NEXT: slli a0, a0, 17 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret %1 = shl i32 %a, 17 ret i32 %1 @@ -407,11 +411,13 @@ ; RV64I-LABEL: srliw: ; RV64I: # %bb.0: ; RV64I-NEXT: srliw a0, a0, 8 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV32I-LABEL: srliw: ; RV32I: # %bb.0: ; RV32I-NEXT: srli a0, a0, 8 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret %1 = lshr i32 %a, 8 ret i32 %1 @@ -421,11 +427,13 @@ ; RV64I-LABEL: sraiw: ; RV64I: # %bb.0: ; RV64I-NEXT: sraiw a0, a0, 9 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV32I-LABEL: sraiw: ; RV32I: # %bb.0: ; RV32I-NEXT: srai a0, a0, 9 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret %1 = ashr i32 %a, 9 ret i32 %1 @@ -435,10 +443,12 @@ ; RV64I-LABEL: sextw: ; RV64I: # %bb.0: ; RV64I-NEXT: sext.w a0, a0 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV32I-LABEL: sextw: ; RV32I: # %bb.0: +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ret i32 %a } @@ -447,11 +457,13 @@ ; RV64I-LABEL: addw: ; RV64I: # %bb.0: ; RV64I-NEXT: addw a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV32I-LABEL: addw: ; RV32I: # %bb.0: ; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret %1 = add i32 %a, %b ret i32 %1 @@ -461,11 +473,13 @@ ; RV64I-LABEL: subw: ; RV64I: # %bb.0: ; RV64I-NEXT: subw a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV32I-LABEL: subw: ; RV32I: # %bb.0: ; RV32I-NEXT: sub a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret %1 = sub i32 %a, %b ret i32 %1 @@ -475,11 +489,13 @@ ; RV64I-LABEL: sllw: ; RV64I: # %bb.0: ; RV64I-NEXT: sllw a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV32I-LABEL: sllw: ; RV32I: # %bb.0: ; RV32I-NEXT: sll a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret %1 = shl i32 %a, %b ret i32 %1 @@ -489,11 +505,13 @@ ; RV64I-LABEL: srlw: ; RV64I: # %bb.0: ; RV64I-NEXT: srlw a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV32I-LABEL: srlw: ; RV32I: # %bb.0: ; RV32I-NEXT: srl a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret %1 = lshr i32 %a, %b ret i32 %1 @@ -503,11 +521,13 @@ ; RV64I-LABEL: sraw: ; RV64I: # %bb.0: ; RV64I-NEXT: sraw a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV32I-LABEL: sraw: ; RV32I: # %bb.0: ; RV32I-NEXT: sra a0, a0, a2 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret %1 = trunc i64 %a to i32 %2 = ashr i32 %1, %b Index: test/CodeGen/RISCV/atomic-cmpxchg-flag.ll =================================================================== --- test/CodeGen/RISCV/atomic-cmpxchg-flag.ll +++ test/CodeGen/RISCV/atomic-cmpxchg-flag.ll @@ -7,7 +7,6 @@ ; higher bits were masked to zero for the comparison. define i1 @cmpxchg_i32_seq_cst_seq_cst(i32* %ptr, i32 signext %cmp, - i32 signext %val) { ; RV64IA-LABEL: cmpxchg_i32_seq_cst_seq_cst: ; RV64IA: # %bb.0: # %entry ; RV64IA-NEXT: .LBB0_1: # %entry @@ -21,7 +20,9 @@ ; RV64IA-NEXT: .LBB0_3: # %entry ; RV64IA-NEXT: xor a0, a3, a1 ; RV64IA-NEXT: seqz a0, a0 +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret + i32 signext %val) { entry: %0 = cmpxchg i32* %ptr, i32 %cmp, i32 %val seq_cst seq_cst %1 = extractvalue { i32, i1 } %0, 1 Index: test/CodeGen/RISCV/atomic-cmpxchg.ll =================================================================== --- test/CodeGen/RISCV/atomic-cmpxchg.ll +++ test/CodeGen/RISCV/atomic-cmpxchg.ll @@ -12,7 +12,9 @@ ; RV32I-LABEL: cmpxchg_i8_monotonic_monotonic: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: sb a1, 11(sp) ; RV32I-NEXT: addi a1, sp, 11 ; RV32I-NEXT: mv a3, zero @@ -20,6 +22,7 @@ ; RV32I-NEXT: call __atomic_compare_exchange_1 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: cmpxchg_i8_monotonic_monotonic: @@ -44,12 +47,15 @@ ; RV32IA-NEXT: sc.w a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB0_1 ; RV32IA-NEXT: .LBB0_3: +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: cmpxchg_i8_monotonic_monotonic: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: sb a1, 7(sp) ; RV64I-NEXT: addi a1, sp, 7 ; RV64I-NEXT: mv a3, zero @@ -57,6 +63,7 @@ ; RV64I-NEXT: call __atomic_compare_exchange_1 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: cmpxchg_i8_monotonic_monotonic: @@ -81,6 +88,7 @@ ; RV64IA-NEXT: sc.w a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB0_1 ; RV64IA-NEXT: .LBB0_3: +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val monotonic monotonic ret void @@ -90,7 +98,9 @@ ; RV32I-LABEL: cmpxchg_i8_acquire_monotonic: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: sb a1, 11(sp) ; RV32I-NEXT: addi a1, sp, 11 ; RV32I-NEXT: addi a3, zero, 2 @@ -98,6 +108,7 @@ ; RV32I-NEXT: call __atomic_compare_exchange_1 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: cmpxchg_i8_acquire_monotonic: @@ -122,12 +133,15 @@ ; RV32IA-NEXT: sc.w a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB1_1 ; RV32IA-NEXT: .LBB1_3: +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: cmpxchg_i8_acquire_monotonic: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: sb a1, 7(sp) ; RV64I-NEXT: addi a1, sp, 7 ; RV64I-NEXT: addi a3, zero, 2 @@ -135,6 +149,7 @@ ; RV64I-NEXT: call __atomic_compare_exchange_1 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: cmpxchg_i8_acquire_monotonic: @@ -159,6 +174,7 @@ ; RV64IA-NEXT: sc.w a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB1_1 ; RV64IA-NEXT: .LBB1_3: +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val acquire monotonic ret void @@ -168,7 +184,9 @@ ; RV32I-LABEL: cmpxchg_i8_acquire_acquire: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: sb a1, 11(sp) ; RV32I-NEXT: addi a1, sp, 11 ; RV32I-NEXT: addi a3, zero, 2 @@ -176,6 +194,7 @@ ; RV32I-NEXT: call __atomic_compare_exchange_1 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: cmpxchg_i8_acquire_acquire: @@ -200,12 +219,15 @@ ; RV32IA-NEXT: sc.w a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB2_1 ; RV32IA-NEXT: .LBB2_3: +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: cmpxchg_i8_acquire_acquire: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: sb a1, 7(sp) ; RV64I-NEXT: addi a1, sp, 7 ; RV64I-NEXT: addi a3, zero, 2 @@ -213,6 +235,7 @@ ; RV64I-NEXT: call __atomic_compare_exchange_1 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: cmpxchg_i8_acquire_acquire: @@ -237,6 +260,7 @@ ; RV64IA-NEXT: sc.w a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB2_1 ; RV64IA-NEXT: .LBB2_3: +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val acquire acquire ret void @@ -246,7 +270,9 @@ ; RV32I-LABEL: cmpxchg_i8_release_monotonic: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: sb a1, 11(sp) ; RV32I-NEXT: addi a1, sp, 11 ; RV32I-NEXT: addi a3, zero, 3 @@ -254,6 +280,7 @@ ; RV32I-NEXT: call __atomic_compare_exchange_1 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: cmpxchg_i8_release_monotonic: @@ -278,12 +305,15 @@ ; RV32IA-NEXT: sc.w.rl a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB3_1 ; RV32IA-NEXT: .LBB3_3: +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: cmpxchg_i8_release_monotonic: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: sb a1, 7(sp) ; RV64I-NEXT: addi a1, sp, 7 ; RV64I-NEXT: addi a3, zero, 3 @@ -291,6 +321,7 @@ ; RV64I-NEXT: call __atomic_compare_exchange_1 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: cmpxchg_i8_release_monotonic: @@ -315,6 +346,7 @@ ; RV64IA-NEXT: sc.w.rl a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB3_1 ; RV64IA-NEXT: .LBB3_3: +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val release monotonic ret void @@ -324,7 +356,9 @@ ; RV32I-LABEL: cmpxchg_i8_release_acquire: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: sb a1, 11(sp) ; RV32I-NEXT: addi a1, sp, 11 ; RV32I-NEXT: addi a3, zero, 3 @@ -332,6 +366,7 @@ ; RV32I-NEXT: call __atomic_compare_exchange_1 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: cmpxchg_i8_release_acquire: @@ -356,12 +391,15 @@ ; RV32IA-NEXT: sc.w.rl a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB4_1 ; RV32IA-NEXT: .LBB4_3: +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: cmpxchg_i8_release_acquire: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: sb a1, 7(sp) ; RV64I-NEXT: addi a1, sp, 7 ; RV64I-NEXT: addi a3, zero, 3 @@ -369,6 +407,7 @@ ; RV64I-NEXT: call __atomic_compare_exchange_1 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: cmpxchg_i8_release_acquire: @@ -393,6 +432,7 @@ ; RV64IA-NEXT: sc.w.rl a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB4_1 ; RV64IA-NEXT: .LBB4_3: +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val release acquire ret void @@ -402,7 +442,9 @@ ; RV32I-LABEL: cmpxchg_i8_acq_rel_monotonic: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: sb a1, 11(sp) ; RV32I-NEXT: addi a1, sp, 11 ; RV32I-NEXT: addi a3, zero, 4 @@ -410,6 +452,7 @@ ; RV32I-NEXT: call __atomic_compare_exchange_1 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: cmpxchg_i8_acq_rel_monotonic: @@ -434,12 +477,15 @@ ; RV32IA-NEXT: sc.w.rl a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB5_1 ; RV32IA-NEXT: .LBB5_3: +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: cmpxchg_i8_acq_rel_monotonic: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: sb a1, 7(sp) ; RV64I-NEXT: addi a1, sp, 7 ; RV64I-NEXT: addi a3, zero, 4 @@ -447,6 +493,7 @@ ; RV64I-NEXT: call __atomic_compare_exchange_1 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: cmpxchg_i8_acq_rel_monotonic: @@ -471,6 +518,7 @@ ; RV64IA-NEXT: sc.w.rl a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB5_1 ; RV64IA-NEXT: .LBB5_3: +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val acq_rel monotonic ret void @@ -480,7 +528,9 @@ ; RV32I-LABEL: cmpxchg_i8_acq_rel_acquire: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: sb a1, 11(sp) ; RV32I-NEXT: addi a1, sp, 11 ; RV32I-NEXT: addi a3, zero, 4 @@ -488,6 +538,7 @@ ; RV32I-NEXT: call __atomic_compare_exchange_1 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: cmpxchg_i8_acq_rel_acquire: @@ -512,12 +563,15 @@ ; RV32IA-NEXT: sc.w.rl a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB6_1 ; RV32IA-NEXT: .LBB6_3: +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: cmpxchg_i8_acq_rel_acquire: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: sb a1, 7(sp) ; RV64I-NEXT: addi a1, sp, 7 ; RV64I-NEXT: addi a3, zero, 4 @@ -525,6 +579,7 @@ ; RV64I-NEXT: call __atomic_compare_exchange_1 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: cmpxchg_i8_acq_rel_acquire: @@ -549,6 +604,7 @@ ; RV64IA-NEXT: sc.w.rl a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB6_1 ; RV64IA-NEXT: .LBB6_3: +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val acq_rel acquire ret void @@ -558,7 +614,9 @@ ; RV32I-LABEL: cmpxchg_i8_seq_cst_monotonic: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: sb a1, 11(sp) ; RV32I-NEXT: addi a1, sp, 11 ; RV32I-NEXT: addi a3, zero, 5 @@ -566,6 +624,7 @@ ; RV32I-NEXT: call __atomic_compare_exchange_1 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: cmpxchg_i8_seq_cst_monotonic: @@ -590,12 +649,15 @@ ; RV32IA-NEXT: sc.w.aqrl a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB7_1 ; RV32IA-NEXT: .LBB7_3: +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: cmpxchg_i8_seq_cst_monotonic: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: sb a1, 7(sp) ; RV64I-NEXT: addi a1, sp, 7 ; RV64I-NEXT: addi a3, zero, 5 @@ -603,6 +665,7 @@ ; RV64I-NEXT: call __atomic_compare_exchange_1 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: cmpxchg_i8_seq_cst_monotonic: @@ -627,6 +690,7 @@ ; RV64IA-NEXT: sc.w.aqrl a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB7_1 ; RV64IA-NEXT: .LBB7_3: +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val seq_cst monotonic ret void @@ -636,7 +700,9 @@ ; RV32I-LABEL: cmpxchg_i8_seq_cst_acquire: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: sb a1, 11(sp) ; RV32I-NEXT: addi a1, sp, 11 ; RV32I-NEXT: addi a3, zero, 5 @@ -644,6 +710,7 @@ ; RV32I-NEXT: call __atomic_compare_exchange_1 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: cmpxchg_i8_seq_cst_acquire: @@ -668,12 +735,15 @@ ; RV32IA-NEXT: sc.w.aqrl a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB8_1 ; RV32IA-NEXT: .LBB8_3: +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: cmpxchg_i8_seq_cst_acquire: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: sb a1, 7(sp) ; RV64I-NEXT: addi a1, sp, 7 ; RV64I-NEXT: addi a3, zero, 5 @@ -681,6 +751,7 @@ ; RV64I-NEXT: call __atomic_compare_exchange_1 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: cmpxchg_i8_seq_cst_acquire: @@ -705,6 +776,7 @@ ; RV64IA-NEXT: sc.w.aqrl a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB8_1 ; RV64IA-NEXT: .LBB8_3: +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val seq_cst acquire ret void @@ -714,7 +786,9 @@ ; RV32I-LABEL: cmpxchg_i8_seq_cst_seq_cst: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: sb a1, 11(sp) ; RV32I-NEXT: addi a1, sp, 11 ; RV32I-NEXT: addi a3, zero, 5 @@ -722,6 +796,7 @@ ; RV32I-NEXT: call __atomic_compare_exchange_1 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: cmpxchg_i8_seq_cst_seq_cst: @@ -746,12 +821,15 @@ ; RV32IA-NEXT: sc.w.aqrl a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB9_1 ; RV32IA-NEXT: .LBB9_3: +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: cmpxchg_i8_seq_cst_seq_cst: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: sb a1, 7(sp) ; RV64I-NEXT: addi a1, sp, 7 ; RV64I-NEXT: addi a3, zero, 5 @@ -759,6 +837,7 @@ ; RV64I-NEXT: call __atomic_compare_exchange_1 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: cmpxchg_i8_seq_cst_seq_cst: @@ -783,6 +862,7 @@ ; RV64IA-NEXT: sc.w.aqrl a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB9_1 ; RV64IA-NEXT: .LBB9_3: +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val seq_cst seq_cst ret void @@ -792,7 +872,9 @@ ; RV32I-LABEL: cmpxchg_i16_monotonic_monotonic: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: sh a1, 10(sp) ; RV32I-NEXT: addi a1, sp, 10 ; RV32I-NEXT: mv a3, zero @@ -800,6 +882,7 @@ ; RV32I-NEXT: call __atomic_compare_exchange_2 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: cmpxchg_i16_monotonic_monotonic: @@ -825,12 +908,15 @@ ; RV32IA-NEXT: sc.w a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB10_1 ; RV32IA-NEXT: .LBB10_3: +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: cmpxchg_i16_monotonic_monotonic: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: sh a1, 6(sp) ; RV64I-NEXT: addi a1, sp, 6 ; RV64I-NEXT: mv a3, zero @@ -838,6 +924,7 @@ ; RV64I-NEXT: call __atomic_compare_exchange_2 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: cmpxchg_i16_monotonic_monotonic: @@ -863,6 +950,7 @@ ; RV64IA-NEXT: sc.w a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB10_1 ; RV64IA-NEXT: .LBB10_3: +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val monotonic monotonic ret void @@ -872,7 +960,9 @@ ; RV32I-LABEL: cmpxchg_i16_acquire_monotonic: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: sh a1, 10(sp) ; RV32I-NEXT: addi a1, sp, 10 ; RV32I-NEXT: addi a3, zero, 2 @@ -880,6 +970,7 @@ ; RV32I-NEXT: call __atomic_compare_exchange_2 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: cmpxchg_i16_acquire_monotonic: @@ -905,12 +996,15 @@ ; RV32IA-NEXT: sc.w a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB11_1 ; RV32IA-NEXT: .LBB11_3: +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: cmpxchg_i16_acquire_monotonic: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: sh a1, 6(sp) ; RV64I-NEXT: addi a1, sp, 6 ; RV64I-NEXT: addi a3, zero, 2 @@ -918,6 +1012,7 @@ ; RV64I-NEXT: call __atomic_compare_exchange_2 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: cmpxchg_i16_acquire_monotonic: @@ -943,6 +1038,7 @@ ; RV64IA-NEXT: sc.w a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB11_1 ; RV64IA-NEXT: .LBB11_3: +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val acquire monotonic ret void @@ -952,7 +1048,9 @@ ; RV32I-LABEL: cmpxchg_i16_acquire_acquire: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: sh a1, 10(sp) ; RV32I-NEXT: addi a1, sp, 10 ; RV32I-NEXT: addi a3, zero, 2 @@ -960,6 +1058,7 @@ ; RV32I-NEXT: call __atomic_compare_exchange_2 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: cmpxchg_i16_acquire_acquire: @@ -985,12 +1084,15 @@ ; RV32IA-NEXT: sc.w a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB12_1 ; RV32IA-NEXT: .LBB12_3: +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: cmpxchg_i16_acquire_acquire: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: sh a1, 6(sp) ; RV64I-NEXT: addi a1, sp, 6 ; RV64I-NEXT: addi a3, zero, 2 @@ -998,6 +1100,7 @@ ; RV64I-NEXT: call __atomic_compare_exchange_2 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: cmpxchg_i16_acquire_acquire: @@ -1023,6 +1126,7 @@ ; RV64IA-NEXT: sc.w a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB12_1 ; RV64IA-NEXT: .LBB12_3: +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val acquire acquire ret void @@ -1032,7 +1136,9 @@ ; RV32I-LABEL: cmpxchg_i16_release_monotonic: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: sh a1, 10(sp) ; RV32I-NEXT: addi a1, sp, 10 ; RV32I-NEXT: addi a3, zero, 3 @@ -1040,6 +1146,7 @@ ; RV32I-NEXT: call __atomic_compare_exchange_2 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: cmpxchg_i16_release_monotonic: @@ -1065,12 +1172,15 @@ ; RV32IA-NEXT: sc.w.rl a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB13_1 ; RV32IA-NEXT: .LBB13_3: +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: cmpxchg_i16_release_monotonic: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: sh a1, 6(sp) ; RV64I-NEXT: addi a1, sp, 6 ; RV64I-NEXT: addi a3, zero, 3 @@ -1078,6 +1188,7 @@ ; RV64I-NEXT: call __atomic_compare_exchange_2 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: cmpxchg_i16_release_monotonic: @@ -1103,6 +1214,7 @@ ; RV64IA-NEXT: sc.w.rl a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB13_1 ; RV64IA-NEXT: .LBB13_3: +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val release monotonic ret void @@ -1112,7 +1224,9 @@ ; RV32I-LABEL: cmpxchg_i16_release_acquire: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: sh a1, 10(sp) ; RV32I-NEXT: addi a1, sp, 10 ; RV32I-NEXT: addi a3, zero, 3 @@ -1120,6 +1234,7 @@ ; RV32I-NEXT: call __atomic_compare_exchange_2 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: cmpxchg_i16_release_acquire: @@ -1145,12 +1260,15 @@ ; RV32IA-NEXT: sc.w.rl a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB14_1 ; RV32IA-NEXT: .LBB14_3: +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: cmpxchg_i16_release_acquire: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: sh a1, 6(sp) ; RV64I-NEXT: addi a1, sp, 6 ; RV64I-NEXT: addi a3, zero, 3 @@ -1158,6 +1276,7 @@ ; RV64I-NEXT: call __atomic_compare_exchange_2 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: cmpxchg_i16_release_acquire: @@ -1183,6 +1302,7 @@ ; RV64IA-NEXT: sc.w.rl a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB14_1 ; RV64IA-NEXT: .LBB14_3: +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val release acquire ret void @@ -1192,7 +1312,9 @@ ; RV32I-LABEL: cmpxchg_i16_acq_rel_monotonic: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: sh a1, 10(sp) ; RV32I-NEXT: addi a1, sp, 10 ; RV32I-NEXT: addi a3, zero, 4 @@ -1200,6 +1322,7 @@ ; RV32I-NEXT: call __atomic_compare_exchange_2 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: cmpxchg_i16_acq_rel_monotonic: @@ -1225,12 +1348,15 @@ ; RV32IA-NEXT: sc.w.rl a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB15_1 ; RV32IA-NEXT: .LBB15_3: +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: cmpxchg_i16_acq_rel_monotonic: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: sh a1, 6(sp) ; RV64I-NEXT: addi a1, sp, 6 ; RV64I-NEXT: addi a3, zero, 4 @@ -1238,6 +1364,7 @@ ; RV64I-NEXT: call __atomic_compare_exchange_2 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: cmpxchg_i16_acq_rel_monotonic: @@ -1263,6 +1390,7 @@ ; RV64IA-NEXT: sc.w.rl a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB15_1 ; RV64IA-NEXT: .LBB15_3: +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val acq_rel monotonic ret void @@ -1272,7 +1400,9 @@ ; RV32I-LABEL: cmpxchg_i16_acq_rel_acquire: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: sh a1, 10(sp) ; RV32I-NEXT: addi a1, sp, 10 ; RV32I-NEXT: addi a3, zero, 4 @@ -1280,6 +1410,7 @@ ; RV32I-NEXT: call __atomic_compare_exchange_2 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: cmpxchg_i16_acq_rel_acquire: @@ -1305,12 +1436,15 @@ ; RV32IA-NEXT: sc.w.rl a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB16_1 ; RV32IA-NEXT: .LBB16_3: +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: cmpxchg_i16_acq_rel_acquire: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: sh a1, 6(sp) ; RV64I-NEXT: addi a1, sp, 6 ; RV64I-NEXT: addi a3, zero, 4 @@ -1318,6 +1452,7 @@ ; RV64I-NEXT: call __atomic_compare_exchange_2 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: cmpxchg_i16_acq_rel_acquire: @@ -1343,6 +1478,7 @@ ; RV64IA-NEXT: sc.w.rl a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB16_1 ; RV64IA-NEXT: .LBB16_3: +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val acq_rel acquire ret void @@ -1352,7 +1488,9 @@ ; RV32I-LABEL: cmpxchg_i16_seq_cst_monotonic: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: sh a1, 10(sp) ; RV32I-NEXT: addi a1, sp, 10 ; RV32I-NEXT: addi a3, zero, 5 @@ -1360,6 +1498,7 @@ ; RV32I-NEXT: call __atomic_compare_exchange_2 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: cmpxchg_i16_seq_cst_monotonic: @@ -1385,12 +1524,15 @@ ; RV32IA-NEXT: sc.w.aqrl a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB17_1 ; RV32IA-NEXT: .LBB17_3: +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: cmpxchg_i16_seq_cst_monotonic: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: sh a1, 6(sp) ; RV64I-NEXT: addi a1, sp, 6 ; RV64I-NEXT: addi a3, zero, 5 @@ -1398,6 +1540,7 @@ ; RV64I-NEXT: call __atomic_compare_exchange_2 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: cmpxchg_i16_seq_cst_monotonic: @@ -1423,6 +1566,7 @@ ; RV64IA-NEXT: sc.w.aqrl a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB17_1 ; RV64IA-NEXT: .LBB17_3: +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val seq_cst monotonic ret void @@ -1432,7 +1576,9 @@ ; RV32I-LABEL: cmpxchg_i16_seq_cst_acquire: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: sh a1, 10(sp) ; RV32I-NEXT: addi a1, sp, 10 ; RV32I-NEXT: addi a3, zero, 5 @@ -1440,6 +1586,7 @@ ; RV32I-NEXT: call __atomic_compare_exchange_2 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: cmpxchg_i16_seq_cst_acquire: @@ -1465,12 +1612,15 @@ ; RV32IA-NEXT: sc.w.aqrl a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB18_1 ; RV32IA-NEXT: .LBB18_3: +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: cmpxchg_i16_seq_cst_acquire: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: sh a1, 6(sp) ; RV64I-NEXT: addi a1, sp, 6 ; RV64I-NEXT: addi a3, zero, 5 @@ -1478,6 +1628,7 @@ ; RV64I-NEXT: call __atomic_compare_exchange_2 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: cmpxchg_i16_seq_cst_acquire: @@ -1503,6 +1654,7 @@ ; RV64IA-NEXT: sc.w.aqrl a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB18_1 ; RV64IA-NEXT: .LBB18_3: +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val seq_cst acquire ret void @@ -1512,7 +1664,9 @@ ; RV32I-LABEL: cmpxchg_i16_seq_cst_seq_cst: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: sh a1, 10(sp) ; RV32I-NEXT: addi a1, sp, 10 ; RV32I-NEXT: addi a3, zero, 5 @@ -1520,6 +1674,7 @@ ; RV32I-NEXT: call __atomic_compare_exchange_2 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: cmpxchg_i16_seq_cst_seq_cst: @@ -1545,12 +1700,15 @@ ; RV32IA-NEXT: sc.w.aqrl a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB19_1 ; RV32IA-NEXT: .LBB19_3: +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: cmpxchg_i16_seq_cst_seq_cst: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: sh a1, 6(sp) ; RV64I-NEXT: addi a1, sp, 6 ; RV64I-NEXT: addi a3, zero, 5 @@ -1558,6 +1716,7 @@ ; RV64I-NEXT: call __atomic_compare_exchange_2 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: cmpxchg_i16_seq_cst_seq_cst: @@ -1583,6 +1742,7 @@ ; RV64IA-NEXT: sc.w.aqrl a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB19_1 ; RV64IA-NEXT: .LBB19_3: +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val seq_cst seq_cst ret void @@ -1592,7 +1752,9 @@ ; RV32I-LABEL: cmpxchg_i32_monotonic_monotonic: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: sw a1, 8(sp) ; RV32I-NEXT: addi a1, sp, 8 ; RV32I-NEXT: mv a3, zero @@ -1600,6 +1762,7 @@ ; RV32I-NEXT: call __atomic_compare_exchange_4 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: cmpxchg_i32_monotonic_monotonic: @@ -1611,12 +1774,15 @@ ; RV32IA-NEXT: sc.w a4, a2, (a0) ; RV32IA-NEXT: bnez a4, .LBB20_1 ; RV32IA-NEXT: .LBB20_3: +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: cmpxchg_i32_monotonic_monotonic: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: sw a1, 4(sp) ; RV64I-NEXT: addi a1, sp, 4 ; RV64I-NEXT: mv a3, zero @@ -1624,6 +1790,7 @@ ; RV64I-NEXT: call __atomic_compare_exchange_4 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: cmpxchg_i32_monotonic_monotonic: @@ -1635,6 +1802,7 @@ ; RV64IA-NEXT: sc.w a4, a2, (a0) ; RV64IA-NEXT: bnez a4, .LBB20_1 ; RV64IA-NEXT: .LBB20_3: +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val monotonic monotonic ret void @@ -1644,7 +1812,9 @@ ; RV32I-LABEL: cmpxchg_i32_acquire_monotonic: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: sw a1, 8(sp) ; RV32I-NEXT: addi a1, sp, 8 ; RV32I-NEXT: addi a3, zero, 2 @@ -1652,6 +1822,7 @@ ; RV32I-NEXT: call __atomic_compare_exchange_4 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: cmpxchg_i32_acquire_monotonic: @@ -1663,12 +1834,15 @@ ; RV32IA-NEXT: sc.w a4, a2, (a0) ; RV32IA-NEXT: bnez a4, .LBB21_1 ; RV32IA-NEXT: .LBB21_3: +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: cmpxchg_i32_acquire_monotonic: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: sw a1, 4(sp) ; RV64I-NEXT: addi a1, sp, 4 ; RV64I-NEXT: addi a3, zero, 2 @@ -1676,6 +1850,7 @@ ; RV64I-NEXT: call __atomic_compare_exchange_4 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: cmpxchg_i32_acquire_monotonic: @@ -1687,6 +1862,7 @@ ; RV64IA-NEXT: sc.w a4, a2, (a0) ; RV64IA-NEXT: bnez a4, .LBB21_1 ; RV64IA-NEXT: .LBB21_3: +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val acquire monotonic ret void @@ -1696,7 +1872,9 @@ ; RV32I-LABEL: cmpxchg_i32_acquire_acquire: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: sw a1, 8(sp) ; RV32I-NEXT: addi a1, sp, 8 ; RV32I-NEXT: addi a3, zero, 2 @@ -1704,6 +1882,7 @@ ; RV32I-NEXT: call __atomic_compare_exchange_4 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: cmpxchg_i32_acquire_acquire: @@ -1715,12 +1894,15 @@ ; RV32IA-NEXT: sc.w a4, a2, (a0) ; RV32IA-NEXT: bnez a4, .LBB22_1 ; RV32IA-NEXT: .LBB22_3: +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: cmpxchg_i32_acquire_acquire: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: sw a1, 4(sp) ; RV64I-NEXT: addi a1, sp, 4 ; RV64I-NEXT: addi a3, zero, 2 @@ -1728,6 +1910,7 @@ ; RV64I-NEXT: call __atomic_compare_exchange_4 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: cmpxchg_i32_acquire_acquire: @@ -1739,6 +1922,7 @@ ; RV64IA-NEXT: sc.w a4, a2, (a0) ; RV64IA-NEXT: bnez a4, .LBB22_1 ; RV64IA-NEXT: .LBB22_3: +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val acquire acquire ret void @@ -1748,7 +1932,9 @@ ; RV32I-LABEL: cmpxchg_i32_release_monotonic: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: sw a1, 8(sp) ; RV32I-NEXT: addi a1, sp, 8 ; RV32I-NEXT: addi a3, zero, 3 @@ -1756,6 +1942,7 @@ ; RV32I-NEXT: call __atomic_compare_exchange_4 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: cmpxchg_i32_release_monotonic: @@ -1767,12 +1954,15 @@ ; RV32IA-NEXT: sc.w.rl a4, a2, (a0) ; RV32IA-NEXT: bnez a4, .LBB23_1 ; RV32IA-NEXT: .LBB23_3: +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: cmpxchg_i32_release_monotonic: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: sw a1, 4(sp) ; RV64I-NEXT: addi a1, sp, 4 ; RV64I-NEXT: addi a3, zero, 3 @@ -1780,6 +1970,7 @@ ; RV64I-NEXT: call __atomic_compare_exchange_4 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: cmpxchg_i32_release_monotonic: @@ -1791,6 +1982,7 @@ ; RV64IA-NEXT: sc.w.rl a4, a2, (a0) ; RV64IA-NEXT: bnez a4, .LBB23_1 ; RV64IA-NEXT: .LBB23_3: +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val release monotonic ret void @@ -1800,7 +1992,9 @@ ; RV32I-LABEL: cmpxchg_i32_release_acquire: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: sw a1, 8(sp) ; RV32I-NEXT: addi a1, sp, 8 ; RV32I-NEXT: addi a3, zero, 3 @@ -1808,6 +2002,7 @@ ; RV32I-NEXT: call __atomic_compare_exchange_4 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: cmpxchg_i32_release_acquire: @@ -1819,12 +2014,15 @@ ; RV32IA-NEXT: sc.w.rl a4, a2, (a0) ; RV32IA-NEXT: bnez a4, .LBB24_1 ; RV32IA-NEXT: .LBB24_3: +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: cmpxchg_i32_release_acquire: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: sw a1, 4(sp) ; RV64I-NEXT: addi a1, sp, 4 ; RV64I-NEXT: addi a3, zero, 3 @@ -1832,6 +2030,7 @@ ; RV64I-NEXT: call __atomic_compare_exchange_4 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: cmpxchg_i32_release_acquire: @@ -1843,6 +2042,7 @@ ; RV64IA-NEXT: sc.w.rl a4, a2, (a0) ; RV64IA-NEXT: bnez a4, .LBB24_1 ; RV64IA-NEXT: .LBB24_3: +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val release acquire ret void @@ -1852,7 +2052,9 @@ ; RV32I-LABEL: cmpxchg_i32_acq_rel_monotonic: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: sw a1, 8(sp) ; RV32I-NEXT: addi a1, sp, 8 ; RV32I-NEXT: addi a3, zero, 4 @@ -1860,6 +2062,7 @@ ; RV32I-NEXT: call __atomic_compare_exchange_4 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: cmpxchg_i32_acq_rel_monotonic: @@ -1871,12 +2074,15 @@ ; RV32IA-NEXT: sc.w.rl a4, a2, (a0) ; RV32IA-NEXT: bnez a4, .LBB25_1 ; RV32IA-NEXT: .LBB25_3: +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: cmpxchg_i32_acq_rel_monotonic: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: sw a1, 4(sp) ; RV64I-NEXT: addi a1, sp, 4 ; RV64I-NEXT: addi a3, zero, 4 @@ -1884,6 +2090,7 @@ ; RV64I-NEXT: call __atomic_compare_exchange_4 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: cmpxchg_i32_acq_rel_monotonic: @@ -1895,6 +2102,7 @@ ; RV64IA-NEXT: sc.w.rl a4, a2, (a0) ; RV64IA-NEXT: bnez a4, .LBB25_1 ; RV64IA-NEXT: .LBB25_3: +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val acq_rel monotonic ret void @@ -1904,7 +2112,9 @@ ; RV32I-LABEL: cmpxchg_i32_acq_rel_acquire: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: sw a1, 8(sp) ; RV32I-NEXT: addi a1, sp, 8 ; RV32I-NEXT: addi a3, zero, 4 @@ -1912,6 +2122,7 @@ ; RV32I-NEXT: call __atomic_compare_exchange_4 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: cmpxchg_i32_acq_rel_acquire: @@ -1923,12 +2134,15 @@ ; RV32IA-NEXT: sc.w.rl a4, a2, (a0) ; RV32IA-NEXT: bnez a4, .LBB26_1 ; RV32IA-NEXT: .LBB26_3: +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: cmpxchg_i32_acq_rel_acquire: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: sw a1, 4(sp) ; RV64I-NEXT: addi a1, sp, 4 ; RV64I-NEXT: addi a3, zero, 4 @@ -1936,6 +2150,7 @@ ; RV64I-NEXT: call __atomic_compare_exchange_4 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: cmpxchg_i32_acq_rel_acquire: @@ -1947,6 +2162,7 @@ ; RV64IA-NEXT: sc.w.rl a4, a2, (a0) ; RV64IA-NEXT: bnez a4, .LBB26_1 ; RV64IA-NEXT: .LBB26_3: +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val acq_rel acquire ret void @@ -1956,7 +2172,9 @@ ; RV32I-LABEL: cmpxchg_i32_seq_cst_monotonic: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: sw a1, 8(sp) ; RV32I-NEXT: addi a1, sp, 8 ; RV32I-NEXT: addi a3, zero, 5 @@ -1964,6 +2182,7 @@ ; RV32I-NEXT: call __atomic_compare_exchange_4 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: cmpxchg_i32_seq_cst_monotonic: @@ -1975,12 +2194,15 @@ ; RV32IA-NEXT: sc.w.aqrl a4, a2, (a0) ; RV32IA-NEXT: bnez a4, .LBB27_1 ; RV32IA-NEXT: .LBB27_3: +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: cmpxchg_i32_seq_cst_monotonic: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: sw a1, 4(sp) ; RV64I-NEXT: addi a1, sp, 4 ; RV64I-NEXT: addi a3, zero, 5 @@ -1988,6 +2210,7 @@ ; RV64I-NEXT: call __atomic_compare_exchange_4 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: cmpxchg_i32_seq_cst_monotonic: @@ -1999,6 +2222,7 @@ ; RV64IA-NEXT: sc.w.aqrl a4, a2, (a0) ; RV64IA-NEXT: bnez a4, .LBB27_1 ; RV64IA-NEXT: .LBB27_3: +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val seq_cst monotonic ret void @@ -2008,7 +2232,9 @@ ; RV32I-LABEL: cmpxchg_i32_seq_cst_acquire: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: sw a1, 8(sp) ; RV32I-NEXT: addi a1, sp, 8 ; RV32I-NEXT: addi a3, zero, 5 @@ -2016,6 +2242,7 @@ ; RV32I-NEXT: call __atomic_compare_exchange_4 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: cmpxchg_i32_seq_cst_acquire: @@ -2027,12 +2254,15 @@ ; RV32IA-NEXT: sc.w.aqrl a4, a2, (a0) ; RV32IA-NEXT: bnez a4, .LBB28_1 ; RV32IA-NEXT: .LBB28_3: +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: cmpxchg_i32_seq_cst_acquire: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: sw a1, 4(sp) ; RV64I-NEXT: addi a1, sp, 4 ; RV64I-NEXT: addi a3, zero, 5 @@ -2040,6 +2270,7 @@ ; RV64I-NEXT: call __atomic_compare_exchange_4 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: cmpxchg_i32_seq_cst_acquire: @@ -2051,6 +2282,7 @@ ; RV64IA-NEXT: sc.w.aqrl a4, a2, (a0) ; RV64IA-NEXT: bnez a4, .LBB28_1 ; RV64IA-NEXT: .LBB28_3: +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val seq_cst acquire ret void @@ -2060,7 +2292,9 @@ ; RV32I-LABEL: cmpxchg_i32_seq_cst_seq_cst: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: sw a1, 8(sp) ; RV32I-NEXT: addi a1, sp, 8 ; RV32I-NEXT: addi a3, zero, 5 @@ -2068,6 +2302,7 @@ ; RV32I-NEXT: call __atomic_compare_exchange_4 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: cmpxchg_i32_seq_cst_seq_cst: @@ -2079,12 +2314,15 @@ ; RV32IA-NEXT: sc.w.aqrl a4, a2, (a0) ; RV32IA-NEXT: bnez a4, .LBB29_1 ; RV32IA-NEXT: .LBB29_3: +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: cmpxchg_i32_seq_cst_seq_cst: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: sw a1, 4(sp) ; RV64I-NEXT: addi a1, sp, 4 ; RV64I-NEXT: addi a3, zero, 5 @@ -2092,6 +2330,7 @@ ; RV64I-NEXT: call __atomic_compare_exchange_4 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: cmpxchg_i32_seq_cst_seq_cst: @@ -2103,6 +2342,7 @@ ; RV64IA-NEXT: sc.w.aqrl a4, a2, (a0) ; RV64IA-NEXT: bnez a4, .LBB29_1 ; RV64IA-NEXT: .LBB29_3: +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val seq_cst seq_cst ret void @@ -2112,7 +2352,9 @@ ; RV32I-LABEL: cmpxchg_i64_monotonic_monotonic: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: sw a2, 4(sp) ; RV32I-NEXT: sw a1, 0(sp) ; RV32I-NEXT: mv a1, sp @@ -2123,12 +2365,15 @@ ; RV32I-NEXT: call __atomic_compare_exchange_8 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: cmpxchg_i64_monotonic_monotonic: ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 +; RV32IA-NEXT: .cfi_def_cfa_offset 16 ; RV32IA-NEXT: sw ra, 12(sp) +; RV32IA-NEXT: .cfi_offset ra, -4 ; RV32IA-NEXT: sw a2, 4(sp) ; RV32IA-NEXT: sw a1, 0(sp) ; RV32IA-NEXT: mv a1, sp @@ -2139,12 +2384,15 @@ ; RV32IA-NEXT: call __atomic_compare_exchange_8 ; RV32IA-NEXT: lw ra, 12(sp) ; RV32IA-NEXT: addi sp, sp, 16 +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: cmpxchg_i64_monotonic_monotonic: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: sd a1, 0(sp) ; RV64I-NEXT: mv a1, sp ; RV64I-NEXT: mv a3, zero @@ -2152,6 +2400,7 @@ ; RV64I-NEXT: call __atomic_compare_exchange_8 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: cmpxchg_i64_monotonic_monotonic: @@ -2163,6 +2412,7 @@ ; RV64IA-NEXT: sc.d a4, a2, (a0) ; RV64IA-NEXT: bnez a4, .LBB30_1 ; RV64IA-NEXT: .LBB30_3: +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val monotonic monotonic ret void @@ -2172,23 +2422,28 @@ ; RV32I-LABEL: cmpxchg_i64_acquire_monotonic: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: sw a2, 4(sp) ; RV32I-NEXT: sw a1, 0(sp) ; RV32I-NEXT: mv a1, sp ; RV32I-NEXT: mv a2, a3 ; RV32I-NEXT: mv a3, a4 -; RV32I-NEXT: addi a4, zero, 2 +; RV32I-NEXT: addi a4, zero, 2 ; RV32I-NEXT: mv a5, zero ; RV32I-NEXT: call __atomic_compare_exchange_8 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: cmpxchg_i64_acquire_monotonic: ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 +; RV32IA-NEXT: .cfi_def_cfa_offset 16 ; RV32IA-NEXT: sw ra, 12(sp) +; RV32IA-NEXT: .cfi_offset ra, -4 ; RV32IA-NEXT: sw a2, 4(sp) ; RV32IA-NEXT: sw a1, 0(sp) ; RV32IA-NEXT: mv a1, sp @@ -2199,12 +2454,15 @@ ; RV32IA-NEXT: call __atomic_compare_exchange_8 ; RV32IA-NEXT: lw ra, 12(sp) ; RV32IA-NEXT: addi sp, sp, 16 +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: cmpxchg_i64_acquire_monotonic: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: sd a1, 0(sp) ; RV64I-NEXT: mv a1, sp ; RV64I-NEXT: addi a3, zero, 2 @@ -2212,6 +2470,7 @@ ; RV64I-NEXT: call __atomic_compare_exchange_8 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: cmpxchg_i64_acquire_monotonic: @@ -2223,6 +2482,7 @@ ; RV64IA-NEXT: sc.d a4, a2, (a0) ; RV64IA-NEXT: bnez a4, .LBB31_1 ; RV64IA-NEXT: .LBB31_3: +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val acquire monotonic ret void @@ -2232,7 +2492,9 @@ ; RV32I-LABEL: cmpxchg_i64_acquire_acquire: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: sw a2, 4(sp) ; RV32I-NEXT: sw a1, 0(sp) ; RV32I-NEXT: mv a1, sp @@ -2243,12 +2505,15 @@ ; RV32I-NEXT: call __atomic_compare_exchange_8 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: cmpxchg_i64_acquire_acquire: ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 +; RV32IA-NEXT: .cfi_def_cfa_offset 16 ; RV32IA-NEXT: sw ra, 12(sp) +; RV32IA-NEXT: .cfi_offset ra, -4 ; RV32IA-NEXT: sw a2, 4(sp) ; RV32IA-NEXT: sw a1, 0(sp) ; RV32IA-NEXT: mv a1, sp @@ -2259,12 +2524,15 @@ ; RV32IA-NEXT: call __atomic_compare_exchange_8 ; RV32IA-NEXT: lw ra, 12(sp) ; RV32IA-NEXT: addi sp, sp, 16 +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: cmpxchg_i64_acquire_acquire: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: sd a1, 0(sp) ; RV64I-NEXT: mv a1, sp ; RV64I-NEXT: addi a3, zero, 2 @@ -2272,6 +2540,7 @@ ; RV64I-NEXT: call __atomic_compare_exchange_8 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: cmpxchg_i64_acquire_acquire: @@ -2283,6 +2552,7 @@ ; RV64IA-NEXT: sc.d a4, a2, (a0) ; RV64IA-NEXT: bnez a4, .LBB32_1 ; RV64IA-NEXT: .LBB32_3: +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val acquire acquire ret void @@ -2292,7 +2562,9 @@ ; RV32I-LABEL: cmpxchg_i64_release_monotonic: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: sw a2, 4(sp) ; RV32I-NEXT: sw a1, 0(sp) ; RV32I-NEXT: mv a1, sp @@ -2303,12 +2575,15 @@ ; RV32I-NEXT: call __atomic_compare_exchange_8 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: cmpxchg_i64_release_monotonic: ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 +; RV32IA-NEXT: .cfi_def_cfa_offset 16 ; RV32IA-NEXT: sw ra, 12(sp) +; RV32IA-NEXT: .cfi_offset ra, -4 ; RV32IA-NEXT: sw a2, 4(sp) ; RV32IA-NEXT: sw a1, 0(sp) ; RV32IA-NEXT: mv a1, sp @@ -2319,12 +2594,15 @@ ; RV32IA-NEXT: call __atomic_compare_exchange_8 ; RV32IA-NEXT: lw ra, 12(sp) ; RV32IA-NEXT: addi sp, sp, 16 +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: cmpxchg_i64_release_monotonic: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: sd a1, 0(sp) ; RV64I-NEXT: mv a1, sp ; RV64I-NEXT: addi a3, zero, 3 @@ -2332,6 +2610,7 @@ ; RV64I-NEXT: call __atomic_compare_exchange_8 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: cmpxchg_i64_release_monotonic: @@ -2343,6 +2622,7 @@ ; RV64IA-NEXT: sc.d.rl a4, a2, (a0) ; RV64IA-NEXT: bnez a4, .LBB33_1 ; RV64IA-NEXT: .LBB33_3: +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val release monotonic ret void @@ -2352,7 +2632,9 @@ ; RV32I-LABEL: cmpxchg_i64_release_acquire: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: sw a2, 4(sp) ; RV32I-NEXT: sw a1, 0(sp) ; RV32I-NEXT: mv a1, sp @@ -2363,12 +2645,15 @@ ; RV32I-NEXT: call __atomic_compare_exchange_8 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: cmpxchg_i64_release_acquire: ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 +; RV32IA-NEXT: .cfi_def_cfa_offset 16 ; RV32IA-NEXT: sw ra, 12(sp) +; RV32IA-NEXT: .cfi_offset ra, -4 ; RV32IA-NEXT: sw a2, 4(sp) ; RV32IA-NEXT: sw a1, 0(sp) ; RV32IA-NEXT: mv a1, sp @@ -2379,12 +2664,15 @@ ; RV32IA-NEXT: call __atomic_compare_exchange_8 ; RV32IA-NEXT: lw ra, 12(sp) ; RV32IA-NEXT: addi sp, sp, 16 +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: cmpxchg_i64_release_acquire: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: sd a1, 0(sp) ; RV64I-NEXT: mv a1, sp ; RV64I-NEXT: addi a3, zero, 3 @@ -2392,6 +2680,7 @@ ; RV64I-NEXT: call __atomic_compare_exchange_8 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: cmpxchg_i64_release_acquire: @@ -2403,6 +2692,7 @@ ; RV64IA-NEXT: sc.d.rl a4, a2, (a0) ; RV64IA-NEXT: bnez a4, .LBB34_1 ; RV64IA-NEXT: .LBB34_3: +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val release acquire ret void @@ -2412,7 +2702,9 @@ ; RV32I-LABEL: cmpxchg_i64_acq_rel_monotonic: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: sw a2, 4(sp) ; RV32I-NEXT: sw a1, 0(sp) ; RV32I-NEXT: mv a1, sp @@ -2423,12 +2715,15 @@ ; RV32I-NEXT: call __atomic_compare_exchange_8 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: cmpxchg_i64_acq_rel_monotonic: ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 +; RV32IA-NEXT: .cfi_def_cfa_offset 16 ; RV32IA-NEXT: sw ra, 12(sp) +; RV32IA-NEXT: .cfi_offset ra, -4 ; RV32IA-NEXT: sw a2, 4(sp) ; RV32IA-NEXT: sw a1, 0(sp) ; RV32IA-NEXT: mv a1, sp @@ -2439,12 +2734,15 @@ ; RV32IA-NEXT: call __atomic_compare_exchange_8 ; RV32IA-NEXT: lw ra, 12(sp) ; RV32IA-NEXT: addi sp, sp, 16 +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: cmpxchg_i64_acq_rel_monotonic: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: sd a1, 0(sp) ; RV64I-NEXT: mv a1, sp ; RV64I-NEXT: addi a3, zero, 4 @@ -2452,6 +2750,7 @@ ; RV64I-NEXT: call __atomic_compare_exchange_8 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: cmpxchg_i64_acq_rel_monotonic: @@ -2463,6 +2762,7 @@ ; RV64IA-NEXT: sc.d.rl a4, a2, (a0) ; RV64IA-NEXT: bnez a4, .LBB35_1 ; RV64IA-NEXT: .LBB35_3: +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val acq_rel monotonic ret void @@ -2472,7 +2772,9 @@ ; RV32I-LABEL: cmpxchg_i64_acq_rel_acquire: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: sw a2, 4(sp) ; RV32I-NEXT: sw a1, 0(sp) ; RV32I-NEXT: mv a1, sp @@ -2483,12 +2785,15 @@ ; RV32I-NEXT: call __atomic_compare_exchange_8 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: cmpxchg_i64_acq_rel_acquire: ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 +; RV32IA-NEXT: .cfi_def_cfa_offset 16 ; RV32IA-NEXT: sw ra, 12(sp) +; RV32IA-NEXT: .cfi_offset ra, -4 ; RV32IA-NEXT: sw a2, 4(sp) ; RV32IA-NEXT: sw a1, 0(sp) ; RV32IA-NEXT: mv a1, sp @@ -2499,12 +2804,15 @@ ; RV32IA-NEXT: call __atomic_compare_exchange_8 ; RV32IA-NEXT: lw ra, 12(sp) ; RV32IA-NEXT: addi sp, sp, 16 +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: cmpxchg_i64_acq_rel_acquire: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: sd a1, 0(sp) ; RV64I-NEXT: mv a1, sp ; RV64I-NEXT: addi a3, zero, 4 @@ -2512,6 +2820,7 @@ ; RV64I-NEXT: call __atomic_compare_exchange_8 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: cmpxchg_i64_acq_rel_acquire: @@ -2523,6 +2832,7 @@ ; RV64IA-NEXT: sc.d.rl a4, a2, (a0) ; RV64IA-NEXT: bnez a4, .LBB36_1 ; RV64IA-NEXT: .LBB36_3: +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val acq_rel acquire ret void @@ -2532,7 +2842,9 @@ ; RV32I-LABEL: cmpxchg_i64_seq_cst_monotonic: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: sw a2, 4(sp) ; RV32I-NEXT: sw a1, 0(sp) ; RV32I-NEXT: mv a1, sp @@ -2543,12 +2855,15 @@ ; RV32I-NEXT: call __atomic_compare_exchange_8 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: cmpxchg_i64_seq_cst_monotonic: ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 +; RV32IA-NEXT: .cfi_def_cfa_offset 16 ; RV32IA-NEXT: sw ra, 12(sp) +; RV32IA-NEXT: .cfi_offset ra, -4 ; RV32IA-NEXT: sw a2, 4(sp) ; RV32IA-NEXT: sw a1, 0(sp) ; RV32IA-NEXT: mv a1, sp @@ -2559,12 +2874,15 @@ ; RV32IA-NEXT: call __atomic_compare_exchange_8 ; RV32IA-NEXT: lw ra, 12(sp) ; RV32IA-NEXT: addi sp, sp, 16 +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: cmpxchg_i64_seq_cst_monotonic: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: sd a1, 0(sp) ; RV64I-NEXT: mv a1, sp ; RV64I-NEXT: addi a3, zero, 5 @@ -2572,6 +2890,7 @@ ; RV64I-NEXT: call __atomic_compare_exchange_8 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: cmpxchg_i64_seq_cst_monotonic: @@ -2583,6 +2902,7 @@ ; RV64IA-NEXT: sc.d.aqrl a4, a2, (a0) ; RV64IA-NEXT: bnez a4, .LBB37_1 ; RV64IA-NEXT: .LBB37_3: +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val seq_cst monotonic ret void @@ -2592,7 +2912,9 @@ ; RV32I-LABEL: cmpxchg_i64_seq_cst_acquire: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: sw a2, 4(sp) ; RV32I-NEXT: sw a1, 0(sp) ; RV32I-NEXT: mv a1, sp @@ -2603,12 +2925,15 @@ ; RV32I-NEXT: call __atomic_compare_exchange_8 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: cmpxchg_i64_seq_cst_acquire: ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 +; RV32IA-NEXT: .cfi_def_cfa_offset 16 ; RV32IA-NEXT: sw ra, 12(sp) +; RV32IA-NEXT: .cfi_offset ra, -4 ; RV32IA-NEXT: sw a2, 4(sp) ; RV32IA-NEXT: sw a1, 0(sp) ; RV32IA-NEXT: mv a1, sp @@ -2619,12 +2944,15 @@ ; RV32IA-NEXT: call __atomic_compare_exchange_8 ; RV32IA-NEXT: lw ra, 12(sp) ; RV32IA-NEXT: addi sp, sp, 16 +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: cmpxchg_i64_seq_cst_acquire: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: sd a1, 0(sp) ; RV64I-NEXT: mv a1, sp ; RV64I-NEXT: addi a3, zero, 5 @@ -2632,6 +2960,7 @@ ; RV64I-NEXT: call __atomic_compare_exchange_8 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: cmpxchg_i64_seq_cst_acquire: @@ -2643,6 +2972,7 @@ ; RV64IA-NEXT: sc.d.aqrl a4, a2, (a0) ; RV64IA-NEXT: bnez a4, .LBB38_1 ; RV64IA-NEXT: .LBB38_3: +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val seq_cst acquire ret void @@ -2652,7 +2982,9 @@ ; RV32I-LABEL: cmpxchg_i64_seq_cst_seq_cst: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: sw a2, 4(sp) ; RV32I-NEXT: sw a1, 0(sp) ; RV32I-NEXT: mv a1, sp @@ -2663,12 +2995,15 @@ ; RV32I-NEXT: call __atomic_compare_exchange_8 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: cmpxchg_i64_seq_cst_seq_cst: ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 +; RV32IA-NEXT: .cfi_def_cfa_offset 16 ; RV32IA-NEXT: sw ra, 12(sp) +; RV32IA-NEXT: .cfi_offset ra, -4 ; RV32IA-NEXT: sw a2, 4(sp) ; RV32IA-NEXT: sw a1, 0(sp) ; RV32IA-NEXT: mv a1, sp @@ -2679,12 +3014,15 @@ ; RV32IA-NEXT: call __atomic_compare_exchange_8 ; RV32IA-NEXT: lw ra, 12(sp) ; RV32IA-NEXT: addi sp, sp, 16 +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: cmpxchg_i64_seq_cst_seq_cst: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: sd a1, 0(sp) ; RV64I-NEXT: mv a1, sp ; RV64I-NEXT: addi a3, zero, 5 @@ -2692,6 +3030,7 @@ ; RV64I-NEXT: call __atomic_compare_exchange_8 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: cmpxchg_i64_seq_cst_seq_cst: @@ -2703,6 +3042,7 @@ ; RV64IA-NEXT: sc.d.aqrl a4, a2, (a0) ; RV64IA-NEXT: bnez a4, .LBB39_1 ; RV64IA-NEXT: .LBB39_3: +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val seq_cst seq_cst ret void Index: test/CodeGen/RISCV/atomic-rmw.ll =================================================================== --- test/CodeGen/RISCV/atomic-rmw.ll +++ test/CodeGen/RISCV/atomic-rmw.ll @@ -12,11 +12,14 @@ ; RV32I-LABEL: atomicrmw_xchg_i8_monotonic: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: mv a2, zero ; RV32I-NEXT: call __atomic_exchange_1 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: atomicrmw_xchg_i8_monotonic: @@ -38,16 +41,20 @@ ; RV32IA-NEXT: bnez a5, .LBB0_1 ; RV32IA-NEXT: # %bb.2: ; RV32IA-NEXT: srl a0, a4, a2 +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_xchg_i8_monotonic: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: mv a2, zero ; RV64I-NEXT: call __atomic_exchange_1 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: atomicrmw_xchg_i8_monotonic: @@ -69,6 +76,7 @@ ; RV64IA-NEXT: bnez a5, .LBB0_1 ; RV64IA-NEXT: # %bb.2: ; RV64IA-NEXT: srlw a0, a4, a2 +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %1 = atomicrmw xchg i8* %a, i8 %b monotonic ret i8 %1 @@ -78,11 +86,14 @@ ; RV32I-LABEL: atomicrmw_xchg_i8_acquire: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: addi a2, zero, 2 ; RV32I-NEXT: call __atomic_exchange_1 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: atomicrmw_xchg_i8_acquire: @@ -104,16 +115,20 @@ ; RV32IA-NEXT: bnez a5, .LBB1_1 ; RV32IA-NEXT: # %bb.2: ; RV32IA-NEXT: srl a0, a4, a2 +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_xchg_i8_acquire: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: addi a2, zero, 2 ; RV64I-NEXT: call __atomic_exchange_1 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: atomicrmw_xchg_i8_acquire: @@ -135,6 +150,7 @@ ; RV64IA-NEXT: bnez a5, .LBB1_1 ; RV64IA-NEXT: # %bb.2: ; RV64IA-NEXT: srlw a0, a4, a2 +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %1 = atomicrmw xchg i8* %a, i8 %b acquire ret i8 %1 @@ -144,11 +160,14 @@ ; RV32I-LABEL: atomicrmw_xchg_i8_release: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: addi a2, zero, 3 ; RV32I-NEXT: call __atomic_exchange_1 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: atomicrmw_xchg_i8_release: @@ -170,16 +189,20 @@ ; RV32IA-NEXT: bnez a5, .LBB2_1 ; RV32IA-NEXT: # %bb.2: ; RV32IA-NEXT: srl a0, a4, a2 +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_xchg_i8_release: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: addi a2, zero, 3 ; RV64I-NEXT: call __atomic_exchange_1 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: atomicrmw_xchg_i8_release: @@ -201,6 +224,7 @@ ; RV64IA-NEXT: bnez a5, .LBB2_1 ; RV64IA-NEXT: # %bb.2: ; RV64IA-NEXT: srlw a0, a4, a2 +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %1 = atomicrmw xchg i8* %a, i8 %b release ret i8 %1 @@ -210,11 +234,14 @@ ; RV32I-LABEL: atomicrmw_xchg_i8_acq_rel: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: addi a2, zero, 4 ; RV32I-NEXT: call __atomic_exchange_1 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: atomicrmw_xchg_i8_acq_rel: @@ -236,16 +263,20 @@ ; RV32IA-NEXT: bnez a5, .LBB3_1 ; RV32IA-NEXT: # %bb.2: ; RV32IA-NEXT: srl a0, a4, a2 +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_xchg_i8_acq_rel: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: addi a2, zero, 4 ; RV64I-NEXT: call __atomic_exchange_1 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: atomicrmw_xchg_i8_acq_rel: @@ -267,6 +298,7 @@ ; RV64IA-NEXT: bnez a5, .LBB3_1 ; RV64IA-NEXT: # %bb.2: ; RV64IA-NEXT: srlw a0, a4, a2 +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %1 = atomicrmw xchg i8* %a, i8 %b acq_rel ret i8 %1 @@ -276,11 +308,14 @@ ; RV32I-LABEL: atomicrmw_xchg_i8_seq_cst: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: addi a2, zero, 5 ; RV32I-NEXT: call __atomic_exchange_1 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: atomicrmw_xchg_i8_seq_cst: @@ -302,16 +337,20 @@ ; RV32IA-NEXT: bnez a5, .LBB4_1 ; RV32IA-NEXT: # %bb.2: ; RV32IA-NEXT: srl a0, a4, a2 +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_xchg_i8_seq_cst: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: addi a2, zero, 5 ; RV64I-NEXT: call __atomic_exchange_1 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: atomicrmw_xchg_i8_seq_cst: @@ -333,6 +372,7 @@ ; RV64IA-NEXT: bnez a5, .LBB4_1 ; RV64IA-NEXT: # %bb.2: ; RV64IA-NEXT: srlw a0, a4, a2 +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %1 = atomicrmw xchg i8* %a, i8 %b seq_cst ret i8 %1 @@ -672,11 +712,14 @@ ; RV32I-LABEL: atomicrmw_sub_i8_monotonic: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: mv a2, zero ; RV32I-NEXT: call __atomic_fetch_sub_1 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: atomicrmw_sub_i8_monotonic: @@ -698,16 +741,20 @@ ; RV32IA-NEXT: bnez a5, .LBB10_1 ; RV32IA-NEXT: # %bb.2: ; RV32IA-NEXT: srl a0, a4, a2 +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_sub_i8_monotonic: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: mv a2, zero ; RV64I-NEXT: call __atomic_fetch_sub_1 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: atomicrmw_sub_i8_monotonic: @@ -729,6 +776,7 @@ ; RV64IA-NEXT: bnez a5, .LBB10_1 ; RV64IA-NEXT: # %bb.2: ; RV64IA-NEXT: srlw a0, a4, a2 +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %1 = atomicrmw sub i8* %a, i8 %b monotonic ret i8 %1 @@ -738,11 +786,14 @@ ; RV32I-LABEL: atomicrmw_sub_i8_acquire: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: addi a2, zero, 2 ; RV32I-NEXT: call __atomic_fetch_sub_1 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: atomicrmw_sub_i8_acquire: @@ -764,16 +815,20 @@ ; RV32IA-NEXT: bnez a5, .LBB11_1 ; RV32IA-NEXT: # %bb.2: ; RV32IA-NEXT: srl a0, a4, a2 +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_sub_i8_acquire: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: addi a2, zero, 2 ; RV64I-NEXT: call __atomic_fetch_sub_1 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: atomicrmw_sub_i8_acquire: @@ -795,6 +850,7 @@ ; RV64IA-NEXT: bnez a5, .LBB11_1 ; RV64IA-NEXT: # %bb.2: ; RV64IA-NEXT: srlw a0, a4, a2 +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %1 = atomicrmw sub i8* %a, i8 %b acquire ret i8 %1 @@ -804,11 +860,14 @@ ; RV32I-LABEL: atomicrmw_sub_i8_release: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: addi a2, zero, 3 ; RV32I-NEXT: call __atomic_fetch_sub_1 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: atomicrmw_sub_i8_release: @@ -830,16 +889,20 @@ ; RV32IA-NEXT: bnez a5, .LBB12_1 ; RV32IA-NEXT: # %bb.2: ; RV32IA-NEXT: srl a0, a4, a2 +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_sub_i8_release: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: addi a2, zero, 3 ; RV64I-NEXT: call __atomic_fetch_sub_1 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: atomicrmw_sub_i8_release: @@ -861,6 +924,7 @@ ; RV64IA-NEXT: bnez a5, .LBB12_1 ; RV64IA-NEXT: # %bb.2: ; RV64IA-NEXT: srlw a0, a4, a2 +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %1 = atomicrmw sub i8* %a, i8 %b release ret i8 %1 @@ -870,11 +934,14 @@ ; RV32I-LABEL: atomicrmw_sub_i8_acq_rel: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: addi a2, zero, 4 ; RV32I-NEXT: call __atomic_fetch_sub_1 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: atomicrmw_sub_i8_acq_rel: @@ -896,16 +963,20 @@ ; RV32IA-NEXT: bnez a5, .LBB13_1 ; RV32IA-NEXT: # %bb.2: ; RV32IA-NEXT: srl a0, a4, a2 +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_sub_i8_acq_rel: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: addi a2, zero, 4 ; RV64I-NEXT: call __atomic_fetch_sub_1 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: atomicrmw_sub_i8_acq_rel: @@ -927,6 +998,7 @@ ; RV64IA-NEXT: bnez a5, .LBB13_1 ; RV64IA-NEXT: # %bb.2: ; RV64IA-NEXT: srlw a0, a4, a2 +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %1 = atomicrmw sub i8* %a, i8 %b acq_rel ret i8 %1 @@ -936,11 +1008,14 @@ ; RV32I-LABEL: atomicrmw_sub_i8_seq_cst: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: addi a2, zero, 5 ; RV32I-NEXT: call __atomic_fetch_sub_1 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: atomicrmw_sub_i8_seq_cst: @@ -962,16 +1037,20 @@ ; RV32IA-NEXT: bnez a5, .LBB14_1 ; RV32IA-NEXT: # %bb.2: ; RV32IA-NEXT: srl a0, a4, a2 +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_sub_i8_seq_cst: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: addi a2, zero, 5 ; RV64I-NEXT: call __atomic_fetch_sub_1 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: atomicrmw_sub_i8_seq_cst: @@ -993,6 +1072,7 @@ ; RV64IA-NEXT: bnez a5, .LBB14_1 ; RV64IA-NEXT: # %bb.2: ; RV64IA-NEXT: srlw a0, a4, a2 +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %1 = atomicrmw sub i8* %a, i8 %b seq_cst ret i8 %1 @@ -1272,11 +1352,14 @@ ; RV32I-LABEL: atomicrmw_nand_i8_monotonic: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: mv a2, zero ; RV32I-NEXT: call __atomic_fetch_nand_1 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: atomicrmw_nand_i8_monotonic: @@ -1299,16 +1382,20 @@ ; RV32IA-NEXT: bnez a5, .LBB20_1 ; RV32IA-NEXT: # %bb.2: ; RV32IA-NEXT: srl a0, a4, a2 +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_nand_i8_monotonic: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: mv a2, zero ; RV64I-NEXT: call __atomic_fetch_nand_1 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: atomicrmw_nand_i8_monotonic: @@ -1331,6 +1418,7 @@ ; RV64IA-NEXT: bnez a5, .LBB20_1 ; RV64IA-NEXT: # %bb.2: ; RV64IA-NEXT: srlw a0, a4, a2 +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %1 = atomicrmw nand i8* %a, i8 %b monotonic ret i8 %1 @@ -1340,11 +1428,14 @@ ; RV32I-LABEL: atomicrmw_nand_i8_acquire: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: addi a2, zero, 2 ; RV32I-NEXT: call __atomic_fetch_nand_1 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: atomicrmw_nand_i8_acquire: @@ -1367,16 +1458,20 @@ ; RV32IA-NEXT: bnez a5, .LBB21_1 ; RV32IA-NEXT: # %bb.2: ; RV32IA-NEXT: srl a0, a4, a2 +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_nand_i8_acquire: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: addi a2, zero, 2 ; RV64I-NEXT: call __atomic_fetch_nand_1 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: atomicrmw_nand_i8_acquire: @@ -1399,6 +1494,7 @@ ; RV64IA-NEXT: bnez a5, .LBB21_1 ; RV64IA-NEXT: # %bb.2: ; RV64IA-NEXT: srlw a0, a4, a2 +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %1 = atomicrmw nand i8* %a, i8 %b acquire ret i8 %1 @@ -1408,11 +1504,14 @@ ; RV32I-LABEL: atomicrmw_nand_i8_release: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: addi a2, zero, 3 ; RV32I-NEXT: call __atomic_fetch_nand_1 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: atomicrmw_nand_i8_release: @@ -1435,16 +1534,20 @@ ; RV32IA-NEXT: bnez a5, .LBB22_1 ; RV32IA-NEXT: # %bb.2: ; RV32IA-NEXT: srl a0, a4, a2 +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_nand_i8_release: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: addi a2, zero, 3 ; RV64I-NEXT: call __atomic_fetch_nand_1 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: atomicrmw_nand_i8_release: @@ -1467,6 +1570,7 @@ ; RV64IA-NEXT: bnez a5, .LBB22_1 ; RV64IA-NEXT: # %bb.2: ; RV64IA-NEXT: srlw a0, a4, a2 +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %1 = atomicrmw nand i8* %a, i8 %b release ret i8 %1 @@ -1476,11 +1580,14 @@ ; RV32I-LABEL: atomicrmw_nand_i8_acq_rel: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: addi a2, zero, 4 ; RV32I-NEXT: call __atomic_fetch_nand_1 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: atomicrmw_nand_i8_acq_rel: @@ -1503,16 +1610,20 @@ ; RV32IA-NEXT: bnez a5, .LBB23_1 ; RV32IA-NEXT: # %bb.2: ; RV32IA-NEXT: srl a0, a4, a2 +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_nand_i8_acq_rel: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: addi a2, zero, 4 ; RV64I-NEXT: call __atomic_fetch_nand_1 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: atomicrmw_nand_i8_acq_rel: @@ -1535,6 +1646,7 @@ ; RV64IA-NEXT: bnez a5, .LBB23_1 ; RV64IA-NEXT: # %bb.2: ; RV64IA-NEXT: srlw a0, a4, a2 +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %1 = atomicrmw nand i8* %a, i8 %b acq_rel ret i8 %1 @@ -1544,11 +1656,14 @@ ; RV32I-LABEL: atomicrmw_nand_i8_seq_cst: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: addi a2, zero, 5 ; RV32I-NEXT: call __atomic_fetch_nand_1 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: atomicrmw_nand_i8_seq_cst: @@ -1571,16 +1686,20 @@ ; RV32IA-NEXT: bnez a5, .LBB24_1 ; RV32IA-NEXT: # %bb.2: ; RV32IA-NEXT: srl a0, a4, a2 +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_nand_i8_seq_cst: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: addi a2, zero, 5 ; RV64I-NEXT: call __atomic_fetch_nand_1 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: atomicrmw_nand_i8_seq_cst: @@ -1603,6 +1722,7 @@ ; RV64IA-NEXT: bnez a5, .LBB24_1 ; RV64IA-NEXT: # %bb.2: ; RV64IA-NEXT: srlw a0, a4, a2 +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %1 = atomicrmw nand i8* %a, i8 %b seq_cst ret i8 %1 @@ -4932,11 +5052,14 @@ ; RV32I-LABEL: atomicrmw_xchg_i16_monotonic: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: mv a2, zero ; RV32I-NEXT: call __atomic_exchange_2 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: atomicrmw_xchg_i16_monotonic: @@ -4959,16 +5082,20 @@ ; RV32IA-NEXT: bnez a5, .LBB55_1 ; RV32IA-NEXT: # %bb.2: ; RV32IA-NEXT: srl a0, a4, a3 +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_xchg_i16_monotonic: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: mv a2, zero ; RV64I-NEXT: call __atomic_exchange_2 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: atomicrmw_xchg_i16_monotonic: @@ -4991,6 +5118,7 @@ ; RV64IA-NEXT: bnez a5, .LBB55_1 ; RV64IA-NEXT: # %bb.2: ; RV64IA-NEXT: srlw a0, a4, a3 +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %1 = atomicrmw xchg i16* %a, i16 %b monotonic ret i16 %1 @@ -5000,11 +5128,14 @@ ; RV32I-LABEL: atomicrmw_xchg_i16_acquire: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: addi a2, zero, 2 ; RV32I-NEXT: call __atomic_exchange_2 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: atomicrmw_xchg_i16_acquire: @@ -5027,16 +5158,20 @@ ; RV32IA-NEXT: bnez a5, .LBB56_1 ; RV32IA-NEXT: # %bb.2: ; RV32IA-NEXT: srl a0, a4, a3 +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_xchg_i16_acquire: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: addi a2, zero, 2 ; RV64I-NEXT: call __atomic_exchange_2 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: atomicrmw_xchg_i16_acquire: @@ -5059,6 +5194,7 @@ ; RV64IA-NEXT: bnez a5, .LBB56_1 ; RV64IA-NEXT: # %bb.2: ; RV64IA-NEXT: srlw a0, a4, a3 +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %1 = atomicrmw xchg i16* %a, i16 %b acquire ret i16 %1 @@ -5068,11 +5204,14 @@ ; RV32I-LABEL: atomicrmw_xchg_i16_release: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: addi a2, zero, 3 ; RV32I-NEXT: call __atomic_exchange_2 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: atomicrmw_xchg_i16_release: @@ -5095,16 +5234,20 @@ ; RV32IA-NEXT: bnez a5, .LBB57_1 ; RV32IA-NEXT: # %bb.2: ; RV32IA-NEXT: srl a0, a4, a3 +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_xchg_i16_release: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: addi a2, zero, 3 ; RV64I-NEXT: call __atomic_exchange_2 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: atomicrmw_xchg_i16_release: @@ -5127,6 +5270,7 @@ ; RV64IA-NEXT: bnez a5, .LBB57_1 ; RV64IA-NEXT: # %bb.2: ; RV64IA-NEXT: srlw a0, a4, a3 +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %1 = atomicrmw xchg i16* %a, i16 %b release ret i16 %1 @@ -5136,11 +5280,14 @@ ; RV32I-LABEL: atomicrmw_xchg_i16_acq_rel: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: addi a2, zero, 4 ; RV32I-NEXT: call __atomic_exchange_2 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: atomicrmw_xchg_i16_acq_rel: @@ -5163,16 +5310,20 @@ ; RV32IA-NEXT: bnez a5, .LBB58_1 ; RV32IA-NEXT: # %bb.2: ; RV32IA-NEXT: srl a0, a4, a3 +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_xchg_i16_acq_rel: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: addi a2, zero, 4 ; RV64I-NEXT: call __atomic_exchange_2 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: atomicrmw_xchg_i16_acq_rel: @@ -5195,6 +5346,7 @@ ; RV64IA-NEXT: bnez a5, .LBB58_1 ; RV64IA-NEXT: # %bb.2: ; RV64IA-NEXT: srlw a0, a4, a3 +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %1 = atomicrmw xchg i16* %a, i16 %b acq_rel ret i16 %1 @@ -5204,11 +5356,14 @@ ; RV32I-LABEL: atomicrmw_xchg_i16_seq_cst: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: addi a2, zero, 5 ; RV32I-NEXT: call __atomic_exchange_2 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: atomicrmw_xchg_i16_seq_cst: @@ -5231,16 +5386,20 @@ ; RV32IA-NEXT: bnez a5, .LBB59_1 ; RV32IA-NEXT: # %bb.2: ; RV32IA-NEXT: srl a0, a4, a3 +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_xchg_i16_seq_cst: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: addi a2, zero, 5 ; RV64I-NEXT: call __atomic_exchange_2 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: atomicrmw_xchg_i16_seq_cst: @@ -5263,6 +5422,7 @@ ; RV64IA-NEXT: bnez a5, .LBB59_1 ; RV64IA-NEXT: # %bb.2: ; RV64IA-NEXT: srlw a0, a4, a3 +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %1 = atomicrmw xchg i16* %a, i16 %b seq_cst ret i16 %1 @@ -5612,11 +5772,14 @@ ; RV32I-LABEL: atomicrmw_sub_i16_monotonic: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: mv a2, zero ; RV32I-NEXT: call __atomic_fetch_sub_2 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: atomicrmw_sub_i16_monotonic: @@ -5639,16 +5802,20 @@ ; RV32IA-NEXT: bnez a5, .LBB65_1 ; RV32IA-NEXT: # %bb.2: ; RV32IA-NEXT: srl a0, a4, a3 +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_sub_i16_monotonic: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: mv a2, zero ; RV64I-NEXT: call __atomic_fetch_sub_2 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: atomicrmw_sub_i16_monotonic: @@ -5671,6 +5838,7 @@ ; RV64IA-NEXT: bnez a5, .LBB65_1 ; RV64IA-NEXT: # %bb.2: ; RV64IA-NEXT: srlw a0, a4, a3 +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %1 = atomicrmw sub i16* %a, i16 %b monotonic ret i16 %1 @@ -5680,11 +5848,14 @@ ; RV32I-LABEL: atomicrmw_sub_i16_acquire: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: addi a2, zero, 2 ; RV32I-NEXT: call __atomic_fetch_sub_2 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: atomicrmw_sub_i16_acquire: @@ -5707,16 +5878,20 @@ ; RV32IA-NEXT: bnez a5, .LBB66_1 ; RV32IA-NEXT: # %bb.2: ; RV32IA-NEXT: srl a0, a4, a3 +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_sub_i16_acquire: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: addi a2, zero, 2 ; RV64I-NEXT: call __atomic_fetch_sub_2 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: atomicrmw_sub_i16_acquire: @@ -5739,6 +5914,7 @@ ; RV64IA-NEXT: bnez a5, .LBB66_1 ; RV64IA-NEXT: # %bb.2: ; RV64IA-NEXT: srlw a0, a4, a3 +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %1 = atomicrmw sub i16* %a, i16 %b acquire ret i16 %1 @@ -5748,11 +5924,14 @@ ; RV32I-LABEL: atomicrmw_sub_i16_release: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: addi a2, zero, 3 ; RV32I-NEXT: call __atomic_fetch_sub_2 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: atomicrmw_sub_i16_release: @@ -5775,16 +5954,20 @@ ; RV32IA-NEXT: bnez a5, .LBB67_1 ; RV32IA-NEXT: # %bb.2: ; RV32IA-NEXT: srl a0, a4, a3 +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_sub_i16_release: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: addi a2, zero, 3 ; RV64I-NEXT: call __atomic_fetch_sub_2 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: atomicrmw_sub_i16_release: @@ -5807,6 +5990,7 @@ ; RV64IA-NEXT: bnez a5, .LBB67_1 ; RV64IA-NEXT: # %bb.2: ; RV64IA-NEXT: srlw a0, a4, a3 +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %1 = atomicrmw sub i16* %a, i16 %b release ret i16 %1 @@ -5816,11 +6000,14 @@ ; RV32I-LABEL: atomicrmw_sub_i16_acq_rel: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: addi a2, zero, 4 ; RV32I-NEXT: call __atomic_fetch_sub_2 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: atomicrmw_sub_i16_acq_rel: @@ -5843,16 +6030,20 @@ ; RV32IA-NEXT: bnez a5, .LBB68_1 ; RV32IA-NEXT: # %bb.2: ; RV32IA-NEXT: srl a0, a4, a3 +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_sub_i16_acq_rel: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: addi a2, zero, 4 ; RV64I-NEXT: call __atomic_fetch_sub_2 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: atomicrmw_sub_i16_acq_rel: @@ -5875,6 +6066,7 @@ ; RV64IA-NEXT: bnez a5, .LBB68_1 ; RV64IA-NEXT: # %bb.2: ; RV64IA-NEXT: srlw a0, a4, a3 +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %1 = atomicrmw sub i16* %a, i16 %b acq_rel ret i16 %1 @@ -5884,11 +6076,14 @@ ; RV32I-LABEL: atomicrmw_sub_i16_seq_cst: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: addi a2, zero, 5 ; RV32I-NEXT: call __atomic_fetch_sub_2 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: atomicrmw_sub_i16_seq_cst: @@ -5911,16 +6106,20 @@ ; RV32IA-NEXT: bnez a5, .LBB69_1 ; RV32IA-NEXT: # %bb.2: ; RV32IA-NEXT: srl a0, a4, a3 +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_sub_i16_seq_cst: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: addi a2, zero, 5 ; RV64I-NEXT: call __atomic_fetch_sub_2 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: atomicrmw_sub_i16_seq_cst: @@ -5943,6 +6142,7 @@ ; RV64IA-NEXT: bnez a5, .LBB69_1 ; RV64IA-NEXT: # %bb.2: ; RV64IA-NEXT: srlw a0, a4, a3 +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %1 = atomicrmw sub i16* %a, i16 %b seq_cst ret i16 %1 @@ -6232,11 +6432,14 @@ ; RV32I-LABEL: atomicrmw_nand_i16_monotonic: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: mv a2, zero ; RV32I-NEXT: call __atomic_fetch_nand_2 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: atomicrmw_nand_i16_monotonic: @@ -6260,16 +6463,20 @@ ; RV32IA-NEXT: bnez a5, .LBB75_1 ; RV32IA-NEXT: # %bb.2: ; RV32IA-NEXT: srl a0, a4, a3 +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_nand_i16_monotonic: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: mv a2, zero ; RV64I-NEXT: call __atomic_fetch_nand_2 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: atomicrmw_nand_i16_monotonic: @@ -6293,6 +6500,7 @@ ; RV64IA-NEXT: bnez a5, .LBB75_1 ; RV64IA-NEXT: # %bb.2: ; RV64IA-NEXT: srlw a0, a4, a3 +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %1 = atomicrmw nand i16* %a, i16 %b monotonic ret i16 %1 @@ -6302,11 +6510,14 @@ ; RV32I-LABEL: atomicrmw_nand_i16_acquire: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: addi a2, zero, 2 ; RV32I-NEXT: call __atomic_fetch_nand_2 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: atomicrmw_nand_i16_acquire: @@ -6330,16 +6541,20 @@ ; RV32IA-NEXT: bnez a5, .LBB76_1 ; RV32IA-NEXT: # %bb.2: ; RV32IA-NEXT: srl a0, a4, a3 +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_nand_i16_acquire: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: addi a2, zero, 2 ; RV64I-NEXT: call __atomic_fetch_nand_2 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: atomicrmw_nand_i16_acquire: @@ -6363,6 +6578,7 @@ ; RV64IA-NEXT: bnez a5, .LBB76_1 ; RV64IA-NEXT: # %bb.2: ; RV64IA-NEXT: srlw a0, a4, a3 +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %1 = atomicrmw nand i16* %a, i16 %b acquire ret i16 %1 @@ -6372,11 +6588,14 @@ ; RV32I-LABEL: atomicrmw_nand_i16_release: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: addi a2, zero, 3 ; RV32I-NEXT: call __atomic_fetch_nand_2 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: atomicrmw_nand_i16_release: @@ -6400,16 +6619,20 @@ ; RV32IA-NEXT: bnez a5, .LBB77_1 ; RV32IA-NEXT: # %bb.2: ; RV32IA-NEXT: srl a0, a4, a3 +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_nand_i16_release: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: addi a2, zero, 3 ; RV64I-NEXT: call __atomic_fetch_nand_2 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: atomicrmw_nand_i16_release: @@ -6433,6 +6656,7 @@ ; RV64IA-NEXT: bnez a5, .LBB77_1 ; RV64IA-NEXT: # %bb.2: ; RV64IA-NEXT: srlw a0, a4, a3 +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %1 = atomicrmw nand i16* %a, i16 %b release ret i16 %1 @@ -6442,11 +6666,14 @@ ; RV32I-LABEL: atomicrmw_nand_i16_acq_rel: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: addi a2, zero, 4 ; RV32I-NEXT: call __atomic_fetch_nand_2 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: atomicrmw_nand_i16_acq_rel: @@ -6470,16 +6697,20 @@ ; RV32IA-NEXT: bnez a5, .LBB78_1 ; RV32IA-NEXT: # %bb.2: ; RV32IA-NEXT: srl a0, a4, a3 +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_nand_i16_acq_rel: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: addi a2, zero, 4 ; RV64I-NEXT: call __atomic_fetch_nand_2 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: atomicrmw_nand_i16_acq_rel: @@ -6503,6 +6734,7 @@ ; RV64IA-NEXT: bnez a5, .LBB78_1 ; RV64IA-NEXT: # %bb.2: ; RV64IA-NEXT: srlw a0, a4, a3 +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %1 = atomicrmw nand i16* %a, i16 %b acq_rel ret i16 %1 @@ -6512,11 +6744,14 @@ ; RV32I-LABEL: atomicrmw_nand_i16_seq_cst: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: addi a2, zero, 5 ; RV32I-NEXT: call __atomic_fetch_nand_2 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: atomicrmw_nand_i16_seq_cst: @@ -6540,16 +6775,20 @@ ; RV32IA-NEXT: bnez a5, .LBB79_1 ; RV32IA-NEXT: # %bb.2: ; RV32IA-NEXT: srl a0, a4, a3 +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_nand_i16_seq_cst: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: addi a2, zero, 5 ; RV64I-NEXT: call __atomic_fetch_nand_2 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: atomicrmw_nand_i16_seq_cst: @@ -6573,6 +6812,7 @@ ; RV64IA-NEXT: bnez a5, .LBB79_1 ; RV64IA-NEXT: # %bb.2: ; RV64IA-NEXT: srlw a0, a4, a3 +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %1 = atomicrmw nand i16* %a, i16 %b seq_cst ret i16 %1 @@ -10062,31 +10302,39 @@ ; RV32I-LABEL: atomicrmw_xchg_i32_monotonic: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: mv a2, zero ; RV32I-NEXT: call __atomic_exchange_4 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: atomicrmw_xchg_i32_monotonic: ; RV32IA: # %bb.0: ; RV32IA-NEXT: amoswap.w a0, a1, (a0) +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_xchg_i32_monotonic: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: mv a2, zero ; RV64I-NEXT: call __atomic_exchange_4 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: atomicrmw_xchg_i32_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: amoswap.w a0, a1, (a0) +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %1 = atomicrmw xchg i32* %a, i32 %b monotonic ret i32 %1 @@ -10096,31 +10344,39 @@ ; RV32I-LABEL: atomicrmw_xchg_i32_acquire: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: addi a2, zero, 2 ; RV32I-NEXT: call __atomic_exchange_4 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: atomicrmw_xchg_i32_acquire: ; RV32IA: # %bb.0: ; RV32IA-NEXT: amoswap.w.aq a0, a1, (a0) +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_xchg_i32_acquire: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: addi a2, zero, 2 ; RV64I-NEXT: call __atomic_exchange_4 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: atomicrmw_xchg_i32_acquire: ; RV64IA: # %bb.0: ; RV64IA-NEXT: amoswap.w.aq a0, a1, (a0) +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %1 = atomicrmw xchg i32* %a, i32 %b acquire ret i32 %1 @@ -10130,31 +10386,39 @@ ; RV32I-LABEL: atomicrmw_xchg_i32_release: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: addi a2, zero, 3 ; RV32I-NEXT: call __atomic_exchange_4 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: atomicrmw_xchg_i32_release: ; RV32IA: # %bb.0: ; RV32IA-NEXT: amoswap.w.rl a0, a1, (a0) +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_xchg_i32_release: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: addi a2, zero, 3 ; RV64I-NEXT: call __atomic_exchange_4 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: atomicrmw_xchg_i32_release: ; RV64IA: # %bb.0: ; RV64IA-NEXT: amoswap.w.rl a0, a1, (a0) +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %1 = atomicrmw xchg i32* %a, i32 %b release ret i32 %1 @@ -10164,31 +10428,39 @@ ; RV32I-LABEL: atomicrmw_xchg_i32_acq_rel: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: addi a2, zero, 4 ; RV32I-NEXT: call __atomic_exchange_4 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: atomicrmw_xchg_i32_acq_rel: ; RV32IA: # %bb.0: ; RV32IA-NEXT: amoswap.w.aqrl a0, a1, (a0) +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_xchg_i32_acq_rel: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: addi a2, zero, 4 ; RV64I-NEXT: call __atomic_exchange_4 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: atomicrmw_xchg_i32_acq_rel: ; RV64IA: # %bb.0: ; RV64IA-NEXT: amoswap.w.aqrl a0, a1, (a0) +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %1 = atomicrmw xchg i32* %a, i32 %b acq_rel ret i32 %1 @@ -10198,31 +10470,39 @@ ; RV32I-LABEL: atomicrmw_xchg_i32_seq_cst: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: addi a2, zero, 5 ; RV32I-NEXT: call __atomic_exchange_4 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: atomicrmw_xchg_i32_seq_cst: ; RV32IA: # %bb.0: ; RV32IA-NEXT: amoswap.w.aqrl a0, a1, (a0) +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_xchg_i32_seq_cst: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: addi a2, zero, 5 ; RV64I-NEXT: call __atomic_exchange_4 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: atomicrmw_xchg_i32_seq_cst: ; RV64IA: # %bb.0: ; RV64IA-NEXT: amoswap.w.aqrl a0, a1, (a0) +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %1 = atomicrmw xchg i32* %a, i32 %b seq_cst ret i32 %1 @@ -10402,33 +10682,41 @@ ; RV32I-LABEL: atomicrmw_sub_i32_monotonic: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: mv a2, zero ; RV32I-NEXT: call __atomic_fetch_sub_4 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: atomicrmw_sub_i32_monotonic: ; RV32IA: # %bb.0: ; RV32IA-NEXT: neg a1, a1 ; RV32IA-NEXT: amoadd.w a0, a1, (a0) +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_sub_i32_monotonic: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: mv a2, zero ; RV64I-NEXT: call __atomic_fetch_sub_4 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: atomicrmw_sub_i32_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: neg a1, a1 ; RV64IA-NEXT: amoadd.w a0, a1, (a0) +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %1 = atomicrmw sub i32* %a, i32 %b monotonic ret i32 %1 @@ -10438,33 +10726,41 @@ ; RV32I-LABEL: atomicrmw_sub_i32_acquire: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: addi a2, zero, 2 ; RV32I-NEXT: call __atomic_fetch_sub_4 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: atomicrmw_sub_i32_acquire: ; RV32IA: # %bb.0: ; RV32IA-NEXT: neg a1, a1 ; RV32IA-NEXT: amoadd.w.aq a0, a1, (a0) +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_sub_i32_acquire: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: addi a2, zero, 2 ; RV64I-NEXT: call __atomic_fetch_sub_4 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: atomicrmw_sub_i32_acquire: ; RV64IA: # %bb.0: ; RV64IA-NEXT: neg a1, a1 ; RV64IA-NEXT: amoadd.w.aq a0, a1, (a0) +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %1 = atomicrmw sub i32* %a, i32 %b acquire ret i32 %1 @@ -10474,33 +10770,41 @@ ; RV32I-LABEL: atomicrmw_sub_i32_release: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: addi a2, zero, 3 ; RV32I-NEXT: call __atomic_fetch_sub_4 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: atomicrmw_sub_i32_release: ; RV32IA: # %bb.0: ; RV32IA-NEXT: neg a1, a1 ; RV32IA-NEXT: amoadd.w.rl a0, a1, (a0) +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_sub_i32_release: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: addi a2, zero, 3 ; RV64I-NEXT: call __atomic_fetch_sub_4 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: atomicrmw_sub_i32_release: ; RV64IA: # %bb.0: ; RV64IA-NEXT: neg a1, a1 ; RV64IA-NEXT: amoadd.w.rl a0, a1, (a0) +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %1 = atomicrmw sub i32* %a, i32 %b release ret i32 %1 @@ -10510,33 +10814,41 @@ ; RV32I-LABEL: atomicrmw_sub_i32_acq_rel: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: addi a2, zero, 4 ; RV32I-NEXT: call __atomic_fetch_sub_4 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: atomicrmw_sub_i32_acq_rel: ; RV32IA: # %bb.0: ; RV32IA-NEXT: neg a1, a1 ; RV32IA-NEXT: amoadd.w.aqrl a0, a1, (a0) +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_sub_i32_acq_rel: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: addi a2, zero, 4 ; RV64I-NEXT: call __atomic_fetch_sub_4 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: atomicrmw_sub_i32_acq_rel: ; RV64IA: # %bb.0: ; RV64IA-NEXT: neg a1, a1 ; RV64IA-NEXT: amoadd.w.aqrl a0, a1, (a0) +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %1 = atomicrmw sub i32* %a, i32 %b acq_rel ret i32 %1 @@ -10546,33 +10858,41 @@ ; RV32I-LABEL: atomicrmw_sub_i32_seq_cst: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: addi a2, zero, 5 ; RV32I-NEXT: call __atomic_fetch_sub_4 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: atomicrmw_sub_i32_seq_cst: ; RV32IA: # %bb.0: ; RV32IA-NEXT: neg a1, a1 ; RV32IA-NEXT: amoadd.w.aqrl a0, a1, (a0) +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_sub_i32_seq_cst: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: addi a2, zero, 5 ; RV64I-NEXT: call __atomic_fetch_sub_4 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: atomicrmw_sub_i32_seq_cst: ; RV64IA: # %bb.0: ; RV64IA-NEXT: neg a1, a1 ; RV64IA-NEXT: amoadd.w.aqrl a0, a1, (a0) +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %1 = atomicrmw sub i32* %a, i32 %b seq_cst ret i32 %1 @@ -10752,11 +11072,14 @@ ; RV32I-LABEL: atomicrmw_nand_i32_monotonic: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: mv a2, zero ; RV32I-NEXT: call __atomic_fetch_nand_4 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: atomicrmw_nand_i32_monotonic: @@ -10769,16 +11092,20 @@ ; RV32IA-NEXT: bnez a3, .LBB130_1 ; RV32IA-NEXT: # %bb.2: ; RV32IA-NEXT: mv a0, a2 +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_nand_i32_monotonic: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: mv a2, zero ; RV64I-NEXT: call __atomic_fetch_nand_4 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: atomicrmw_nand_i32_monotonic: @@ -10791,6 +11118,7 @@ ; RV64IA-NEXT: bnez a3, .LBB130_1 ; RV64IA-NEXT: # %bb.2: ; RV64IA-NEXT: mv a0, a2 +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %1 = atomicrmw nand i32* %a, i32 %b monotonic ret i32 %1 @@ -10800,11 +11128,14 @@ ; RV32I-LABEL: atomicrmw_nand_i32_acquire: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: addi a2, zero, 2 ; RV32I-NEXT: call __atomic_fetch_nand_4 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: atomicrmw_nand_i32_acquire: @@ -10817,16 +11148,20 @@ ; RV32IA-NEXT: bnez a3, .LBB131_1 ; RV32IA-NEXT: # %bb.2: ; RV32IA-NEXT: mv a0, a2 +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_nand_i32_acquire: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: addi a2, zero, 2 ; RV64I-NEXT: call __atomic_fetch_nand_4 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: atomicrmw_nand_i32_acquire: @@ -10839,6 +11174,7 @@ ; RV64IA-NEXT: bnez a3, .LBB131_1 ; RV64IA-NEXT: # %bb.2: ; RV64IA-NEXT: mv a0, a2 +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %1 = atomicrmw nand i32* %a, i32 %b acquire ret i32 %1 @@ -10848,11 +11184,14 @@ ; RV32I-LABEL: atomicrmw_nand_i32_release: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: addi a2, zero, 3 ; RV32I-NEXT: call __atomic_fetch_nand_4 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: atomicrmw_nand_i32_release: @@ -10865,16 +11204,20 @@ ; RV32IA-NEXT: bnez a3, .LBB132_1 ; RV32IA-NEXT: # %bb.2: ; RV32IA-NEXT: mv a0, a2 +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_nand_i32_release: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: addi a2, zero, 3 ; RV64I-NEXT: call __atomic_fetch_nand_4 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: atomicrmw_nand_i32_release: @@ -10887,6 +11230,7 @@ ; RV64IA-NEXT: bnez a3, .LBB132_1 ; RV64IA-NEXT: # %bb.2: ; RV64IA-NEXT: mv a0, a2 +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %1 = atomicrmw nand i32* %a, i32 %b release ret i32 %1 @@ -10896,11 +11240,14 @@ ; RV32I-LABEL: atomicrmw_nand_i32_acq_rel: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: addi a2, zero, 4 ; RV32I-NEXT: call __atomic_fetch_nand_4 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: atomicrmw_nand_i32_acq_rel: @@ -10913,16 +11260,20 @@ ; RV32IA-NEXT: bnez a3, .LBB133_1 ; RV32IA-NEXT: # %bb.2: ; RV32IA-NEXT: mv a0, a2 +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_nand_i32_acq_rel: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: addi a2, zero, 4 ; RV64I-NEXT: call __atomic_fetch_nand_4 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: atomicrmw_nand_i32_acq_rel: @@ -10935,6 +11286,7 @@ ; RV64IA-NEXT: bnez a3, .LBB133_1 ; RV64IA-NEXT: # %bb.2: ; RV64IA-NEXT: mv a0, a2 +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %1 = atomicrmw nand i32* %a, i32 %b acq_rel ret i32 %1 @@ -10944,11 +11296,14 @@ ; RV32I-LABEL: atomicrmw_nand_i32_seq_cst: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: addi a2, zero, 5 ; RV32I-NEXT: call __atomic_fetch_nand_4 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: atomicrmw_nand_i32_seq_cst: @@ -10961,16 +11316,20 @@ ; RV32IA-NEXT: bnez a3, .LBB134_1 ; RV32IA-NEXT: # %bb.2: ; RV32IA-NEXT: mv a0, a2 +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_nand_i32_seq_cst: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: addi a2, zero, 5 ; RV64I-NEXT: call __atomic_fetch_nand_4 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: atomicrmw_nand_i32_seq_cst: @@ -10983,6 +11342,7 @@ ; RV64IA-NEXT: bnez a3, .LBB134_1 ; RV64IA-NEXT: # %bb.2: ; RV64IA-NEXT: mv a0, a2 +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %1 = atomicrmw nand i32* %a, i32 %b seq_cst ret i32 %1 @@ -13152,36 +13512,46 @@ ; RV32I-LABEL: atomicrmw_xchg_i64_monotonic: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: mv a3, zero ; RV32I-NEXT: call __atomic_exchange_8 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: atomicrmw_xchg_i64_monotonic: ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 +; RV32IA-NEXT: .cfi_def_cfa_offset 16 ; RV32IA-NEXT: sw ra, 12(sp) +; RV32IA-NEXT: .cfi_offset ra, -4 ; RV32IA-NEXT: mv a3, zero ; RV32IA-NEXT: call __atomic_exchange_8 ; RV32IA-NEXT: lw ra, 12(sp) ; RV32IA-NEXT: addi sp, sp, 16 +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_xchg_i64_monotonic: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: mv a2, zero ; RV64I-NEXT: call __atomic_exchange_8 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: atomicrmw_xchg_i64_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: amoswap.d a0, a1, (a0) +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %1 = atomicrmw xchg i64* %a, i64 %b monotonic ret i64 %1 @@ -13191,36 +13561,46 @@ ; RV32I-LABEL: atomicrmw_xchg_i64_acquire: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: addi a3, zero, 2 ; RV32I-NEXT: call __atomic_exchange_8 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: atomicrmw_xchg_i64_acquire: ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 +; RV32IA-NEXT: .cfi_def_cfa_offset 16 ; RV32IA-NEXT: sw ra, 12(sp) +; RV32IA-NEXT: .cfi_offset ra, -4 ; RV32IA-NEXT: addi a3, zero, 2 ; RV32IA-NEXT: call __atomic_exchange_8 ; RV32IA-NEXT: lw ra, 12(sp) ; RV32IA-NEXT: addi sp, sp, 16 +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_xchg_i64_acquire: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: addi a2, zero, 2 ; RV64I-NEXT: call __atomic_exchange_8 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: atomicrmw_xchg_i64_acquire: ; RV64IA: # %bb.0: ; RV64IA-NEXT: amoswap.d.aq a0, a1, (a0) +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %1 = atomicrmw xchg i64* %a, i64 %b acquire ret i64 %1 @@ -13230,36 +13610,46 @@ ; RV32I-LABEL: atomicrmw_xchg_i64_release: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: addi a3, zero, 3 ; RV32I-NEXT: call __atomic_exchange_8 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: atomicrmw_xchg_i64_release: ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 +; RV32IA-NEXT: .cfi_def_cfa_offset 16 ; RV32IA-NEXT: sw ra, 12(sp) +; RV32IA-NEXT: .cfi_offset ra, -4 ; RV32IA-NEXT: addi a3, zero, 3 ; RV32IA-NEXT: call __atomic_exchange_8 ; RV32IA-NEXT: lw ra, 12(sp) ; RV32IA-NEXT: addi sp, sp, 16 +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_xchg_i64_release: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: addi a2, zero, 3 ; RV64I-NEXT: call __atomic_exchange_8 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: atomicrmw_xchg_i64_release: ; RV64IA: # %bb.0: ; RV64IA-NEXT: amoswap.d.rl a0, a1, (a0) +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %1 = atomicrmw xchg i64* %a, i64 %b release ret i64 %1 @@ -13269,36 +13659,46 @@ ; RV32I-LABEL: atomicrmw_xchg_i64_acq_rel: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: addi a3, zero, 4 ; RV32I-NEXT: call __atomic_exchange_8 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: atomicrmw_xchg_i64_acq_rel: ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 +; RV32IA-NEXT: .cfi_def_cfa_offset 16 ; RV32IA-NEXT: sw ra, 12(sp) +; RV32IA-NEXT: .cfi_offset ra, -4 ; RV32IA-NEXT: addi a3, zero, 4 ; RV32IA-NEXT: call __atomic_exchange_8 ; RV32IA-NEXT: lw ra, 12(sp) ; RV32IA-NEXT: addi sp, sp, 16 +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_xchg_i64_acq_rel: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: addi a2, zero, 4 ; RV64I-NEXT: call __atomic_exchange_8 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: atomicrmw_xchg_i64_acq_rel: ; RV64IA: # %bb.0: ; RV64IA-NEXT: amoswap.d.aqrl a0, a1, (a0) +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %1 = atomicrmw xchg i64* %a, i64 %b acq_rel ret i64 %1 @@ -13308,36 +13708,46 @@ ; RV32I-LABEL: atomicrmw_xchg_i64_seq_cst: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: addi a3, zero, 5 ; RV32I-NEXT: call __atomic_exchange_8 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: atomicrmw_xchg_i64_seq_cst: ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 +; RV32IA-NEXT: .cfi_def_cfa_offset 16 ; RV32IA-NEXT: sw ra, 12(sp) +; RV32IA-NEXT: .cfi_offset ra, -4 ; RV32IA-NEXT: addi a3, zero, 5 ; RV32IA-NEXT: call __atomic_exchange_8 ; RV32IA-NEXT: lw ra, 12(sp) ; RV32IA-NEXT: addi sp, sp, 16 +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_xchg_i64_seq_cst: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: addi a2, zero, 5 ; RV64I-NEXT: call __atomic_exchange_8 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: atomicrmw_xchg_i64_seq_cst: ; RV64IA: # %bb.0: ; RV64IA-NEXT: amoswap.d.aqrl a0, a1, (a0) +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %1 = atomicrmw xchg i64* %a, i64 %b seq_cst ret i64 %1 @@ -13542,37 +13952,47 @@ ; RV32I-LABEL: atomicrmw_sub_i64_monotonic: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: mv a3, zero ; RV32I-NEXT: call __atomic_fetch_sub_8 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: atomicrmw_sub_i64_monotonic: ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 +; RV32IA-NEXT: .cfi_def_cfa_offset 16 ; RV32IA-NEXT: sw ra, 12(sp) +; RV32IA-NEXT: .cfi_offset ra, -4 ; RV32IA-NEXT: mv a3, zero ; RV32IA-NEXT: call __atomic_fetch_sub_8 ; RV32IA-NEXT: lw ra, 12(sp) ; RV32IA-NEXT: addi sp, sp, 16 +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_sub_i64_monotonic: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: mv a2, zero ; RV64I-NEXT: call __atomic_fetch_sub_8 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: atomicrmw_sub_i64_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: neg a1, a1 ; RV64IA-NEXT: amoadd.d a0, a1, (a0) +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %1 = atomicrmw sub i64* %a, i64 %b monotonic ret i64 %1 @@ -13582,37 +14002,47 @@ ; RV32I-LABEL: atomicrmw_sub_i64_acquire: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: addi a3, zero, 2 ; RV32I-NEXT: call __atomic_fetch_sub_8 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: atomicrmw_sub_i64_acquire: ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 +; RV32IA-NEXT: .cfi_def_cfa_offset 16 ; RV32IA-NEXT: sw ra, 12(sp) +; RV32IA-NEXT: .cfi_offset ra, -4 ; RV32IA-NEXT: addi a3, zero, 2 ; RV32IA-NEXT: call __atomic_fetch_sub_8 ; RV32IA-NEXT: lw ra, 12(sp) ; RV32IA-NEXT: addi sp, sp, 16 +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_sub_i64_acquire: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: addi a2, zero, 2 ; RV64I-NEXT: call __atomic_fetch_sub_8 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: atomicrmw_sub_i64_acquire: ; RV64IA: # %bb.0: ; RV64IA-NEXT: neg a1, a1 ; RV64IA-NEXT: amoadd.d.aq a0, a1, (a0) +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %1 = atomicrmw sub i64* %a, i64 %b acquire ret i64 %1 @@ -13622,37 +14052,47 @@ ; RV32I-LABEL: atomicrmw_sub_i64_release: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: addi a3, zero, 3 ; RV32I-NEXT: call __atomic_fetch_sub_8 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: atomicrmw_sub_i64_release: ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 +; RV32IA-NEXT: .cfi_def_cfa_offset 16 ; RV32IA-NEXT: sw ra, 12(sp) +; RV32IA-NEXT: .cfi_offset ra, -4 ; RV32IA-NEXT: addi a3, zero, 3 ; RV32IA-NEXT: call __atomic_fetch_sub_8 ; RV32IA-NEXT: lw ra, 12(sp) ; RV32IA-NEXT: addi sp, sp, 16 +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_sub_i64_release: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: addi a2, zero, 3 ; RV64I-NEXT: call __atomic_fetch_sub_8 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: atomicrmw_sub_i64_release: ; RV64IA: # %bb.0: ; RV64IA-NEXT: neg a1, a1 ; RV64IA-NEXT: amoadd.d.rl a0, a1, (a0) +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %1 = atomicrmw sub i64* %a, i64 %b release ret i64 %1 @@ -13662,37 +14102,47 @@ ; RV32I-LABEL: atomicrmw_sub_i64_acq_rel: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: addi a3, zero, 4 ; RV32I-NEXT: call __atomic_fetch_sub_8 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: atomicrmw_sub_i64_acq_rel: ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 +; RV32IA-NEXT: .cfi_def_cfa_offset 16 ; RV32IA-NEXT: sw ra, 12(sp) +; RV32IA-NEXT: .cfi_offset ra, -4 ; RV32IA-NEXT: addi a3, zero, 4 ; RV32IA-NEXT: call __atomic_fetch_sub_8 ; RV32IA-NEXT: lw ra, 12(sp) ; RV32IA-NEXT: addi sp, sp, 16 +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_sub_i64_acq_rel: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: addi a2, zero, 4 ; RV64I-NEXT: call __atomic_fetch_sub_8 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: atomicrmw_sub_i64_acq_rel: ; RV64IA: # %bb.0: ; RV64IA-NEXT: neg a1, a1 ; RV64IA-NEXT: amoadd.d.aqrl a0, a1, (a0) +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %1 = atomicrmw sub i64* %a, i64 %b acq_rel ret i64 %1 @@ -13702,37 +14152,47 @@ ; RV32I-LABEL: atomicrmw_sub_i64_seq_cst: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: addi a3, zero, 5 ; RV32I-NEXT: call __atomic_fetch_sub_8 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: atomicrmw_sub_i64_seq_cst: ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 +; RV32IA-NEXT: .cfi_def_cfa_offset 16 ; RV32IA-NEXT: sw ra, 12(sp) +; RV32IA-NEXT: .cfi_offset ra, -4 ; RV32IA-NEXT: addi a3, zero, 5 ; RV32IA-NEXT: call __atomic_fetch_sub_8 ; RV32IA-NEXT: lw ra, 12(sp) ; RV32IA-NEXT: addi sp, sp, 16 +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_sub_i64_seq_cst: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: addi a2, zero, 5 ; RV64I-NEXT: call __atomic_fetch_sub_8 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: atomicrmw_sub_i64_seq_cst: ; RV64IA: # %bb.0: ; RV64IA-NEXT: neg a1, a1 ; RV64IA-NEXT: amoadd.d.aqrl a0, a1, (a0) +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %1 = atomicrmw sub i64* %a, i64 %b seq_cst ret i64 %1 @@ -13937,31 +14397,40 @@ ; RV32I-LABEL: atomicrmw_nand_i64_monotonic: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: mv a3, zero ; RV32I-NEXT: call __atomic_fetch_nand_8 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: atomicrmw_nand_i64_monotonic: ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 +; RV32IA-NEXT: .cfi_def_cfa_offset 16 ; RV32IA-NEXT: sw ra, 12(sp) +; RV32IA-NEXT: .cfi_offset ra, -4 ; RV32IA-NEXT: mv a3, zero ; RV32IA-NEXT: call __atomic_fetch_nand_8 ; RV32IA-NEXT: lw ra, 12(sp) ; RV32IA-NEXT: addi sp, sp, 16 +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_nand_i64_monotonic: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: mv a2, zero ; RV64I-NEXT: call __atomic_fetch_nand_8 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: atomicrmw_nand_i64_monotonic: @@ -13974,6 +14443,7 @@ ; RV64IA-NEXT: bnez a3, .LBB185_1 ; RV64IA-NEXT: # %bb.2: ; RV64IA-NEXT: mv a0, a2 +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %1 = atomicrmw nand i64* %a, i64 %b monotonic ret i64 %1 @@ -13983,31 +14453,40 @@ ; RV32I-LABEL: atomicrmw_nand_i64_acquire: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: addi a3, zero, 2 ; RV32I-NEXT: call __atomic_fetch_nand_8 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: atomicrmw_nand_i64_acquire: ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 +; RV32IA-NEXT: .cfi_def_cfa_offset 16 ; RV32IA-NEXT: sw ra, 12(sp) +; RV32IA-NEXT: .cfi_offset ra, -4 ; RV32IA-NEXT: addi a3, zero, 2 ; RV32IA-NEXT: call __atomic_fetch_nand_8 ; RV32IA-NEXT: lw ra, 12(sp) ; RV32IA-NEXT: addi sp, sp, 16 +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_nand_i64_acquire: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: addi a2, zero, 2 ; RV64I-NEXT: call __atomic_fetch_nand_8 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: atomicrmw_nand_i64_acquire: @@ -14020,6 +14499,7 @@ ; RV64IA-NEXT: bnez a3, .LBB186_1 ; RV64IA-NEXT: # %bb.2: ; RV64IA-NEXT: mv a0, a2 +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %1 = atomicrmw nand i64* %a, i64 %b acquire ret i64 %1 @@ -14029,31 +14509,40 @@ ; RV32I-LABEL: atomicrmw_nand_i64_release: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: addi a3, zero, 3 ; RV32I-NEXT: call __atomic_fetch_nand_8 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: atomicrmw_nand_i64_release: ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 +; RV32IA-NEXT: .cfi_def_cfa_offset 16 ; RV32IA-NEXT: sw ra, 12(sp) +; RV32IA-NEXT: .cfi_offset ra, -4 ; RV32IA-NEXT: addi a3, zero, 3 ; RV32IA-NEXT: call __atomic_fetch_nand_8 ; RV32IA-NEXT: lw ra, 12(sp) ; RV32IA-NEXT: addi sp, sp, 16 +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_nand_i64_release: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: addi a2, zero, 3 ; RV64I-NEXT: call __atomic_fetch_nand_8 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: atomicrmw_nand_i64_release: @@ -14066,6 +14555,7 @@ ; RV64IA-NEXT: bnez a3, .LBB187_1 ; RV64IA-NEXT: # %bb.2: ; RV64IA-NEXT: mv a0, a2 +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %1 = atomicrmw nand i64* %a, i64 %b release ret i64 %1 @@ -14075,31 +14565,40 @@ ; RV32I-LABEL: atomicrmw_nand_i64_acq_rel: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: addi a3, zero, 4 ; RV32I-NEXT: call __atomic_fetch_nand_8 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: atomicrmw_nand_i64_acq_rel: ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 +; RV32IA-NEXT: .cfi_def_cfa_offset 16 ; RV32IA-NEXT: sw ra, 12(sp) +; RV32IA-NEXT: .cfi_offset ra, -4 ; RV32IA-NEXT: addi a3, zero, 4 ; RV32IA-NEXT: call __atomic_fetch_nand_8 ; RV32IA-NEXT: lw ra, 12(sp) ; RV32IA-NEXT: addi sp, sp, 16 +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_nand_i64_acq_rel: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: addi a2, zero, 4 ; RV64I-NEXT: call __atomic_fetch_nand_8 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: atomicrmw_nand_i64_acq_rel: @@ -14112,6 +14611,7 @@ ; RV64IA-NEXT: bnez a3, .LBB188_1 ; RV64IA-NEXT: # %bb.2: ; RV64IA-NEXT: mv a0, a2 +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %1 = atomicrmw nand i64* %a, i64 %b acq_rel ret i64 %1 @@ -14121,31 +14621,40 @@ ; RV32I-LABEL: atomicrmw_nand_i64_seq_cst: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 ; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: addi a3, zero, 5 ; RV32I-NEXT: call __atomic_fetch_nand_8 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IA-LABEL: atomicrmw_nand_i64_seq_cst: ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 +; RV32IA-NEXT: .cfi_def_cfa_offset 16 ; RV32IA-NEXT: sw ra, 12(sp) +; RV32IA-NEXT: .cfi_offset ra, -4 ; RV32IA-NEXT: addi a3, zero, 5 ; RV32IA-NEXT: call __atomic_fetch_nand_8 ; RV32IA-NEXT: lw ra, 12(sp) ; RV32IA-NEXT: addi sp, sp, 16 +; RV32IA-NEXT: .cfi_def_cfa_offset 0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_nand_i64_seq_cst: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: addi a2, zero, 5 ; RV64I-NEXT: call __atomic_fetch_nand_8 ; RV64I-NEXT: ld ra, 8(sp) ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IA-LABEL: atomicrmw_nand_i64_seq_cst: @@ -14158,6 +14667,7 @@ ; RV64IA-NEXT: bnez a3, .LBB189_1 ; RV64IA-NEXT: # %bb.2: ; RV64IA-NEXT: mv a0, a2 +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret %1 = atomicrmw nand i64* %a, i64 %b seq_cst ret i64 %1 Index: test/CodeGen/RISCV/bare-select.ll =================================================================== --- test/CodeGen/RISCV/bare-select.ll +++ test/CodeGen/RISCV/bare-select.ll @@ -11,6 +11,7 @@ ; RV32I-NEXT: mv a1, a2 ; RV32I-NEXT: .LBB0_2: ; RV32I-NEXT: mv a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret %1 = select i1 %a, i32 %b, i32 %c ret i32 %1 @@ -25,6 +26,7 @@ ; RV32I-NEXT: mv a1, a2 ; RV32I-NEXT: .LBB1_2: ; RV32I-NEXT: mv a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret %1 = select i1 %a, float %b, float %c ret float %1 Index: test/CodeGen/RISCV/branch-relaxation.ll =================================================================== --- test/CodeGen/RISCV/branch-relaxation.ll +++ test/CodeGen/RISCV/branch-relaxation.ll @@ -14,6 +14,7 @@ ; CHECK-NEXT: .space 4096 ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: .LBB0_2: # %tail +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret br i1 %a, label %iftrue, label %tail @@ -40,11 +41,13 @@ ; CHECK-NEXT: .space 1048576 ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret ; CHECK-NEXT: .LBB1_2: # %jmp ; CHECK-NEXT: #APP ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret br i1 %a, label %iftrue, label %jmp Index: test/CodeGen/RISCV/branch.ll =================================================================== --- test/CodeGen/RISCV/branch.ll +++ test/CodeGen/RISCV/branch.ll @@ -41,6 +41,7 @@ ; RV32I-NEXT: # %bb.11: # %test12 ; RV32I-NEXT: lw a0, 0(a1) ; RV32I-NEXT: .LBB0_12: # %end +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret %val1 = load volatile i32, i32* %b %tst1 = icmp eq i32 %val1, %a Index: test/CodeGen/RISCV/callee-saved-fpr32s.ll =================================================================== --- test/CodeGen/RISCV/callee-saved-fpr32s.ll +++ test/CodeGen/RISCV/callee-saved-fpr32s.ll @@ -88,11 +88,13 @@ ; ILP32-LP64-NEXT: fsw ft2, 8(a1) ; ILP32-LP64-NEXT: fsw ft1, 4(a1) ; ILP32-LP64-NEXT: fsw ft0, %lo(var)(a0) +; ILP32-LP64-NEXT: .cfi_def_cfa_offset 0 ; ILP32-LP64-NEXT: ret ; ; ILP32F-LP64F-LABEL: callee: ; ILP32F-LP64F: # %bb.0: ; ILP32F-LP64F-NEXT: addi sp, sp, -48 +; ILP32F-LP64F-NEXT: .cfi_def_cfa_offset 48 ; ILP32F-LP64F-NEXT: fsw fs0, 44(sp) ; ILP32F-LP64F-NEXT: fsw fs1, 40(sp) ; ILP32F-LP64F-NEXT: fsw fs2, 36(sp) @@ -105,12 +107,25 @@ ; ILP32F-LP64F-NEXT: fsw fs9, 8(sp) ; ILP32F-LP64F-NEXT: fsw fs10, 4(sp) ; ILP32F-LP64F-NEXT: fsw fs11, 0(sp) +; ILP32F-LP64F-NEXT: .cfi_offset fs0, -4 +; ILP32F-LP64F-NEXT: .cfi_offset fs1, -8 +; ILP32F-LP64F-NEXT: .cfi_offset fs2, -12 +; ILP32F-LP64F-NEXT: .cfi_offset fs3, -16 +; ILP32F-LP64F-NEXT: .cfi_offset fs4, -20 +; ILP32F-LP64F-NEXT: .cfi_offset fs5, -24 +; ILP32F-LP64F-NEXT: .cfi_offset fs6, -28 +; ILP32F-LP64F-NEXT: .cfi_offset fs7, -32 +; ILP32F-LP64F-NEXT: .cfi_offset fs8, -36 +; ILP32F-LP64F-NEXT: .cfi_offset fs9, -40 +; ILP32F-LP64F-NEXT: .cfi_offset fs10, -44 +; ILP32F-LP64F-NEXT: .cfi_offset fs11, -48 ; ILP32F-LP64F-NEXT: lui a0, %hi(var) ; ILP32F-LP64F-NEXT: addi a1, a0, %lo(var) ; ; ILP32D-LP64D-LABEL: callee: ; ILP32D-LP64D: # %bb.0: ; ILP32D-LP64D-NEXT: addi sp, sp, -96 +; ILP32D-LP64D-NEXT: .cfi_def_cfa_offset 96 ; ILP32D-LP64D-NEXT: fsd fs0, 88(sp) ; ILP32D-LP64D-NEXT: fsd fs1, 80(sp) ; ILP32D-LP64D-NEXT: fsd fs2, 72(sp) @@ -123,6 +138,18 @@ ; ILP32D-LP64D-NEXT: fsd fs9, 16(sp) ; ILP32D-LP64D-NEXT: fsd fs10, 8(sp) ; ILP32D-LP64D-NEXT: fsd fs11, 0(sp) +; ILP32D-LP64D-NEXT: cfi_offset fs0, -8 +; ILP32D-LP64D-NEXT: cfi_offset fs1, -16 +; ILP32D-LP64D-NEXT: cfi_offset fs2, -24 +; ILP32D-LP64D-NEXT: cfi_offset fs3, -32 +; ILP32D-LP64D-NEXT: cfi_offset fs4, -40 +; ILP32D-LP64D-NEXT: cfi_offset fs5, -48 +; ILP32D-LP64D-NEXT: cfi_offset fs6, -56 +; ILP32D-LP64D-NEXT: cfi_offset fs7, -64 +; ILP32D-LP64D-NEXT: cfi_offset fs8, -72 +; ILP32D-LP64D-NEXT: cfi_offset fs9, -80 +; ILP32D-LP64D-NEXT: cfi_offset fs10, -88 +; ILP32D-LP64D-NEXT: cfi_offset fs11, -96 ; ILP32D-LP64D-NEXT: lui a0, %hi(var) ; ILP32D-LP64D-NEXT: addi a1, a0, %lo(var) %val = load [32 x float], [32 x float]* @var Index: test/CodeGen/RISCV/callee-saved-fpr64s.ll =================================================================== --- test/CodeGen/RISCV/callee-saved-fpr64s.ll +++ test/CodeGen/RISCV/callee-saved-fpr64s.ll @@ -84,11 +84,13 @@ ; ILP32-LP64-NEXT: fsd ft2, 16(a1) ; ILP32-LP64-NEXT: fsd ft1, 8(a1) ; ILP32-LP64-NEXT: fsd ft0, %lo(var)(a0) +; ILP32-LP64-NEXT: .cfi_def_cfa_offset 0 ; ILP32-LP64-NEXT: ret ; ; ILP32D-LP64D-LABEL: callee: ; ILP32D-LP64D: # %bb.0: ; ILP32D-LP64D-NEXT: addi sp, sp, -96 +; ILP32D-LP64D-NEXT: .cfi_def_cfa_offset 96 ; ILP32D-LP64D-NEXT: fsd fs0, 88(sp) ; ILP32D-LP64D-NEXT: fsd fs1, 80(sp) ; ILP32D-LP64D-NEXT: fsd fs2, 72(sp) @@ -101,6 +103,18 @@ ; ILP32D-LP64D-NEXT: fsd fs9, 16(sp) ; ILP32D-LP64D-NEXT: fsd fs10, 8(sp) ; ILP32D-LP64D-NEXT: fsd fs11, 0(sp) +; ILP32D-LP64D-NEXT: .cfi_offset fs0, -8 +; ILP32D-LP64D-NEXT: .cfi_offset fs1, -16 +; ILP32D-LP64D-NEXT: .cfi_offset fs2, -24 +; ILP32D-LP64D-NEXT: .cfi_offset fs3, -32 +; ILP32D-LP64D-NEXT: .cfi_offset fs4, -40 +; ILP32D-LP64D-NEXT: .cfi_offset fs5, -48 +; ILP32D-LP64D-NEXT: .cfi_offset fs6, -56 +; ILP32D-LP64D-NEXT: .cfi_offset fs7, -64 +; ILP32D-LP64D-NEXT: .cfi_offset fs8, -72 +; ILP32D-LP64D-NEXT: .cfi_offset fs9, -80 +; ILP32D-LP64D-NEXT: .cfi_offset fs10, -88 +; ILP32D-LP64D-NEXT: .cfi_offset fs11, -96 ; ILP32D-LP64D-NEXT: lui a0, %hi(var) ; ILP32D-LP64D-NEXT: addi a1, a0, %lo(var) %val = load [32 x double], [32 x double]* @var Index: test/CodeGen/RISCV/callee-saved-gprs.ll =================================================================== --- test/CodeGen/RISCV/callee-saved-gprs.ll +++ test/CodeGen/RISCV/callee-saved-gprs.ll @@ -28,6 +28,7 @@ ; RV32I-LABEL: callee: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -80 +; RV32I-NEXT: .cfi_def_cfa_offset 80 ; RV32I-NEXT: sw s0, 76(sp) ; RV32I-NEXT: sw s1, 72(sp) ; RV32I-NEXT: sw s2, 68(sp) @@ -40,12 +41,25 @@ ; RV32I-NEXT: sw s9, 40(sp) ; RV32I-NEXT: sw s10, 36(sp) ; RV32I-NEXT: sw s11, 32(sp) +; RV32I-NEXT: .cfi_offset s0, -4 +; RV32I-NEXT: .cfi_offset s1, -8 +; RV32I-NEXT: .cfi_offset s2, -12 +; RV32I-NEXT: .cfi_offset s3, -16 +; RV32I-NEXT: .cfi_offset s4, -20 +; RV32I-NEXT: .cfi_offset s5, -24 +; RV32I-NEXT: .cfi_offset s6, -28 +; RV32I-NEXT: .cfi_offset s7, -32 +; RV32I-NEXT: .cfi_offset s8, -36 +; RV32I-NEXT: .cfi_offset s9, -40 +; RV32I-NEXT: .cfi_offset s10, -44 +; RV32I-NEXT: .cfi_offset s11, -48 ; RV32I-NEXT: lui a0, %hi(var) ; RV32I-NEXT: addi a1, a0, %lo(var) ; ; RV32I-WITH-FP-LABEL: callee: ; RV32I-WITH-FP: # %bb.0: ; RV32I-WITH-FP-NEXT: addi sp, sp, -80 +; RV32I-WITH-FP-NEXT: .cfi_def_cfa_offset 80 ; RV32I-WITH-FP-NEXT: sw ra, 76(sp) ; RV32I-WITH-FP-NEXT: sw s0, 72(sp) ; RV32I-WITH-FP-NEXT: sw s1, 68(sp) @@ -59,13 +73,28 @@ ; RV32I-WITH-FP-NEXT: sw s9, 36(sp) ; RV32I-WITH-FP-NEXT: sw s10, 32(sp) ; RV32I-WITH-FP-NEXT: sw s11, 28(sp) +; RV32I-WITH-FP-NEXT: .cfi_offset ra, -4 +; RV32I-WITH-FP-NEXT: .cfi_offset s0, -8 +; RV32I-WITH-FP-NEXT: .cfi_offset s1, -12 +; RV32I-WITH-FP-NEXT: .cfi_offset s2, -16 +; RV32I-WITH-FP-NEXT: .cfi_offset s3, -20 +; RV32I-WITH-FP-NEXT: .cfi_offset s4, -24 +; RV32I-WITH-FP-NEXT: .cfi_offset s5, -28 +; RV32I-WITH-FP-NEXT: .cfi_offset s6, -32 +; RV32I-WITH-FP-NEXT: .cfi_offset s7, -36 +; RV32I-WITH-FP-NEXT: .cfi_offset s8, -40 +; RV32I-WITH-FP-NEXT: .cfi_offset s9, -44 +; RV32I-WITH-FP-NEXT: .cfi_offset s10, -48 +; RV32I-WITH-FP-NEXT: .cfi_offset s11, -52 ; RV32I-WITH-FP-NEXT: addi s0, sp, 80 +; RV32I-WITH-FP-NEXT: .cfi_def_cfa s0, 0 ; RV32I-WITH-FP-NEXT: lui a0, %hi(var) ; RV32I-WITH-FP-NEXT: addi a1, a0, %lo(var) ; ; RV64I-LABEL: callee: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -144 +; RV64I-NEXT: .cfi_def_cfa_offset 144 ; RV64I-NEXT: sd s0, 136(sp) ; RV64I-NEXT: sd s1, 128(sp) ; RV64I-NEXT: sd s2, 120(sp) @@ -78,12 +107,25 @@ ; RV64I-NEXT: sd s9, 64(sp) ; RV64I-NEXT: sd s10, 56(sp) ; RV64I-NEXT: sd s11, 48(sp) +; RV64I-NEXT: .cfi_offset s0, -8 +; RV64I-NEXT: .cfi_offset s1, -16 +; RV64I-NEXT: .cfi_offset s2, -24 +; RV64I-NEXT: .cfi_offset s3, -32 +; RV64I-NEXT: .cfi_offset s4, -40 +; RV64I-NEXT: .cfi_offset s5, -48 +; RV64I-NEXT: .cfi_offset s6, -56 +; RV64I-NEXT: .cfi_offset s7, -64 +; RV64I-NEXT: .cfi_offset s8, -72 +; RV64I-NEXT: .cfi_offset s9, -80 +; RV64I-NEXT: .cfi_offset s10, -88 +; RV64I-NEXT: .cfi_offset s11, -96 ; RV64I-NEXT: lui a0, %hi(var) ; RV64I-NEXT: addi a1, a0, %lo(var) ; ; RV64I-WITH-FP-LABEL: callee: ; RV64I-WITH-FP: # %bb.0: ; RV64I-WITH-FP-NEXT: addi sp, sp, -160 +; RV64I-WITH-FP-NEXT: .cfi_def_cfa_offset 160 ; RV64I-WITH-FP-NEXT: sd ra, 152(sp) ; RV64I-WITH-FP-NEXT: sd s0, 144(sp) ; RV64I-WITH-FP-NEXT: sd s1, 136(sp) @@ -97,7 +139,21 @@ ; RV64I-WITH-FP-NEXT: sd s9, 72(sp) ; RV64I-WITH-FP-NEXT: sd s10, 64(sp) ; RV64I-WITH-FP-NEXT: sd s11, 56(sp) +; RV64I-WITH-FP-NEXT: .cfi_offset ra, -8 +; RV64I-WITH-FP-NEXT: .cfi_offset s0, -16 +; RV64I-WITH-FP-NEXT: .cfi_offset s1, -24 +; RV64I-WITH-FP-NEXT: .cfi_offset s2, -32 +; RV64I-WITH-FP-NEXT: .cfi_offset s3, -40 +; RV64I-WITH-FP-NEXT: .cfi_offset s4, -48 +; RV64I-WITH-FP-NEXT: .cfi_offset s5, -56 +; RV64I-WITH-FP-NEXT: .cfi_offset s6, -64 +; RV64I-WITH-FP-NEXT: .cfi_offset s7, -72 +; RV64I-WITH-FP-NEXT: .cfi_offset s8, -80 +; RV64I-WITH-FP-NEXT: .cfi_offset s9, -88 +; RV64I-WITH-FP-NEXT: .cfi_offset s10, -96 +; RV64I-WITH-FP-NEXT: .cfi_offset s11, -104 ; RV64I-WITH-FP-NEXT: addi s0, sp, 160 +; RV64I-WITH-FP-NEXT: .cfi_def_cfa s0, 0 ; RV64I-WITH-FP-NEXT: lui a0, %hi(var) ; RV64I-WITH-FP-NEXT: addi a1, a0, %lo(var) %val = load [32 x i32], [32 x i32]* @var @@ -140,6 +196,7 @@ ; ; RV32I-WITH-FP-LABEL: caller: ; RV32I-WITH-FP: addi s0, sp, 144 +; RV32I-WITH-FP-NEXT: .cfi_def_cfa s0, 0 ; RV32I-WITH-FP-NEXT: lui a0, %hi(var) ; RV32I-WITH-FP-NEXT: addi s1, a0, %lo(var) ; RV32I-WITH-FP: sw a0, -140(s0) @@ -197,6 +254,7 @@ ; ; RV64I-WITH-FP-LABEL: caller: ; RV64I-WITH-FP: addi s0, sp, 288 +; RV64I-WITH-FP-NEXT: .cfi_def_cfa s0, 0 ; RV64I-WITH-FP-NEXT: lui a0, %hi(var) ; RV64I-WITH-FP-NEXT: addi s1, a0, %lo(var) ; RV64I-WITH-FP: sd a0, -280(s0) Index: test/CodeGen/RISCV/frame-info.ll =================================================================== --- /dev/null +++ test/CodeGen/RISCV/frame-info.ll @@ -0,0 +1,17 @@ +; RUN: llc -mtriple=riscv32 < %s | FileCheck %s + +define void @foo(i32 signext %size) { +; CHECK: .cfi_startproc +; CHECK: .cfi_def_cfa_offset 16 +; CHECK: .cfi_offset ra, -4 +; CHECK: .cfi_offset s0, -8 +; CHECK: .cfi_def_cfa s0, 0 +entry: + %0 = alloca i8, i32 %size, align 16 + call void @bar(i8* nonnull %0) #2 + ret void +; CHECK: .cfi_def_cfa sp, 16 +; CHECK: .cfi_def_cfa_offset 0 +} + +declare void @bar(i8*) Index: test/CodeGen/RISCV/get-setcc-result-type.ll =================================================================== --- test/CodeGen/RISCV/get-setcc-result-type.ll +++ test/CodeGen/RISCV/get-setcc-result-type.ll @@ -21,6 +21,7 @@ ; RV32I-NEXT: seqz a1, a1 ; RV32I-NEXT: neg a1, a1 ; RV32I-NEXT: sw a1, 0(a0) +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret entry: %0 = load <4 x i32>, <4 x i32>* %p, align 16 Index: test/CodeGen/RISCV/hoist-global-addr-base.ll =================================================================== --- test/CodeGen/RISCV/hoist-global-addr-base.ll +++ test/CodeGen/RISCV/hoist-global-addr-base.ll @@ -16,6 +16,7 @@ ; CHECK-NEXT: sw a1, 164(a0) ; CHECK-NEXT: addi a1, zero, 10 ; CHECK-NEXT: sw a1, 160(a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: store i32 10, i32* getelementptr inbounds (%struct.S, %struct.S* @s, i32 0, i32 1), align 4 @@ -35,6 +36,7 @@ ; CHECK-NEXT: addi a1, zero, 10 ; CHECK-NEXT: sw a1, 160(a0) ; CHECK-NEXT: .LBB1_2: # %if.end +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = load i32, i32* getelementptr inbounds (%struct.S, %struct.S* @s, i32 0, i32 2), align 4 @@ -62,6 +64,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(g+73568) ; CHECK-NEXT: addi a0, a0, %lo(g+73568) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret ret i8* getelementptr inbounds ([1048576 x i8], [1048576 x i8]* @g, i32 0, i32 73568) } @@ -77,6 +80,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(g+524288) ; CHECK-NEXT: addi a0, a0, %lo(g+524288) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret ret i8* getelementptr inbounds ([1048576 x i8], [1048576 x i8]* @g, i32 0, i32 524288) } @@ -86,6 +90,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: lui a0, %hi(s+16572) ; CHECK-NEXT: addi a0, a0, %lo(s+16572) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: ret i32* getelementptr inbounds (%struct.S, %struct.S* @s, i32 0, i32 5) @@ -96,6 +101,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: lui a0, %hi(s+160) ; CHECK-NEXT: addi a0, a0, %lo(s+160) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: ret i32* getelementptr inbounds (%struct.S, %struct.S* @s, i32 0, i32 1) @@ -111,9 +117,11 @@ ; CHECK-NEXT: beqz a1, .LBB6_2 ; CHECK-NEXT: # %bb.1: # %if.end ; CHECK-NEXT: addi a0, a0, 168 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret ; CHECK-NEXT: .LBB6_2: # %if.then ; CHECK-NEXT: addi a0, a0, 160 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = load i32, i32* getelementptr inbounds (%struct.S, %struct.S* @s, i32 0, i32 2), align 4 @@ -162,6 +170,7 @@ ; CHECK-NEXT: lui a0, %hi(s+160) ; CHECK-NEXT: addi a1, zero, 10 ; CHECK-NEXT: sw a1, %lo(s+160)(a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: store i32 10, i32* getelementptr inbounds (%struct.S, %struct.S* @s, i32 0, i32 1), align 4 Index: test/CodeGen/RISCV/inline-asm.ll =================================================================== --- test/CodeGen/RISCV/inline-asm.ll +++ test/CodeGen/RISCV/inline-asm.ll @@ -14,6 +14,7 @@ ; RV32I-NEXT: #APP ; RV32I-NEXT: add a0, a0, a1 ; RV32I-NEXT: #NO_APP +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: constraint_r: @@ -23,6 +24,7 @@ ; RV64I-NEXT: #APP ; RV64I-NEXT: add a0, a0, a1 ; RV64I-NEXT: #NO_APP +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = load i32, i32* @gi %2 = tail call i32 asm "add $0, $1, $2", "=r,r,r"(i32 %a, i32 %1) @@ -35,6 +37,7 @@ ; RV32I-NEXT: #APP ; RV32I-NEXT: addi a0, a0, 113 ; RV32I-NEXT: #NO_APP +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: constraint_i: @@ -42,6 +45,7 @@ ; RV64I-NEXT: #APP ; RV64I-NEXT: addi a0, a0, 113 ; RV64I-NEXT: #NO_APP +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = load i32, i32* @gi %2 = tail call i32 asm "addi $0, $1, $2", "=r,r,i"(i32 %a, i32 113) @@ -53,12 +57,14 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: #APP ; RV32I-NEXT: #NO_APP +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: constraint_m: ; RV64I: # %bb.0: ; RV64I-NEXT: #APP ; RV64I-NEXT: #NO_APP +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret call void asm sideeffect "", "=*m"(i32* %a) ret void @@ -70,6 +76,7 @@ ; RV32I-NEXT: #APP ; RV32I-NEXT: lw a0, 0(a0) ; RV32I-NEXT: #NO_APP +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: constraint_m2: @@ -77,6 +84,7 @@ ; RV64I-NEXT: #APP ; RV64I-NEXT: lw a0, 0(a0) ; RV64I-NEXT: #NO_APP +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = tail call i32 asm "lw $0, $1", "=r,*m"(i32* %a) nounwind ret i32 %1 Index: test/CodeGen/RISCV/interrupt-attr-nocall.ll =================================================================== --- test/CodeGen/RISCV/interrupt-attr-nocall.ll +++ test/CodeGen/RISCV/interrupt-attr-nocall.ll @@ -26,8 +26,11 @@ ; CHECK-RV32-LABEL: foo_i32: ; CHECK-RV32: # %bb.0: ; CHECK-RV32-NEXT: addi sp, sp, -16 +; CHECK-RV32-NEXT: .cfi_def_cfa_offset 16 ; CHECK-RV32-NEXT: sw a0, 12(sp) ; CHECK-RV32-NEXT: sw a1, 8(sp) +; CHECK-RV32-NEXT: .cfi_offset a0, -4 +; CHECK-RV32-NEXT: .cfi_offset a1, -8 ; CHECK-RV32-NEXT: lui a0, %hi(a) ; CHECK-RV32-NEXT: lw a0, %lo(a)(a0) ; CHECK-RV32-NEXT: lui a1, %hi(b) @@ -38,8 +41,50 @@ ; CHECK-RV32-NEXT: lw a1, 8(sp) ; CHECK-RV32-NEXT: lw a0, 12(sp) ; CHECK-RV32-NEXT: addi sp, sp, 16 +; CHECK-RV32-NEXT: .cfi_def_cfa_offset 0 ; CHECK-RV32-NEXT: mret ; +; CHECK-RV32-F-LABEL: foo_i32: +; CHECK-RV32-F: # %bb.0: +; CHECK-RV32-F-NEXT: addi sp, sp, -16 +; CHECK-RV32-F-NEXT: .cfi_def_cfa_offset 16 +; CHECK-RV32-F-NEXT: sw a0, 12(sp) +; CHECK-RV32-F-NEXT: sw a1, 8(sp) +; CHECK-RV32-F-NEXT: .cfi_offset a0, -4 +; CHECK-RV32-F-NEXT: .cfi_offset a1, -8 +; CHECK-RV32-F-NEXT: lui a0, %hi(a) +; CHECK-RV32-F-NEXT: lw a0, %lo(a)(a0) +; CHECK-RV32-F-NEXT: lui a1, %hi(b) +; CHECK-RV32-F-NEXT: lw a1, %lo(b)(a1) +; CHECK-RV32-F-NEXT: add a0, a1, a0 +; CHECK-RV32-F-NEXT: lui a1, %hi(c) +; CHECK-RV32-F-NEXT: sw a0, %lo(c)(a1) +; CHECK-RV32-F-NEXT: lw a1, 8(sp) +; CHECK-RV32-F-NEXT: lw a0, 12(sp) +; CHECK-RV32-F-NEXT: addi sp, sp, 16 +; CHECK-RV32-F-NEXT: .cfi_def_cfa_offset 0 +; CHECK-RV32-F-NEXT: mret +; +; CHECK-RV32-FD-LABEL: foo_i32: +; CHECK-RV32-FD: # %bb.0: +; CHECK-RV32-FD-NEXT: addi sp, sp, -16 +; CHECK-RV32-FD-NEXT: .cfi_def_cfa_offset 16 +; CHECK-RV32-FD-NEXT: sw a0, 12(sp) +; CHECK-RV32-FD-NEXT: sw a1, 8(sp) +; CHECK-RV32-FD-NEXT: .cfi_offset a0, -4 +; CHECK-RV32-FD-NEXT: .cfi_offset a1, -8 +; CHECK-RV32-FD-NEXT: lui a0, %hi(a) +; CHECK-RV32-FD-NEXT: lw a0, %lo(a)(a0) +; CHECK-RV32-FD-NEXT: lui a1, %hi(b) +; CHECK-RV32-FD-NEXT: lw a1, %lo(b)(a1) +; CHECK-RV32-FD-NEXT: add a0, a1, a0 +; CHECK-RV32-FD-NEXT: lui a1, %hi(c) +; CHECK-RV32-FD-NEXT: sw a0, %lo(c)(a1) +; CHECK-RV32-FD-NEXT: lw a1, 8(sp) +; CHECK-RV32-FD-NEXT: lw a0, 12(sp) +; CHECK-RV32-FD-NEXT: addi sp, sp, 16 +; CHECK-RV32-FD-NEXT: .cfi_def_cfa_offset 0 +; CHECK-RV32-FD-NEXT: mret %1 = load i32, i32* @a %2 = load i32, i32* @b %add = add nsw i32 %2, %1 @@ -55,11 +100,17 @@ ; CHECK-RV32-LABEL: foo_fp_i32: ; CHECK-RV32: # %bb.0: ; CHECK-RV32-NEXT: addi sp, sp, -16 +; CHECK-RV32-NEXT: .cfi_def_cfa_offset 16 ; CHECK-RV32-NEXT: sw ra, 12(sp) ; CHECK-RV32-NEXT: sw s0, 8(sp) ; CHECK-RV32-NEXT: sw a0, 4(sp) ; CHECK-RV32-NEXT: sw a1, 0(sp) +; CHECK-RV32-NEXT: .cfi_offset ra, -4 +; CHECK-RV32-NEXT: .cfi_offset s0, -8 +; CHECK-RV32-NEXT: .cfi_offset a0, -12 +; CHECK-RV32-NEXT: .cfi_offset a1, -16 ; CHECK-RV32-NEXT: addi s0, sp, 16 +; CHECK-RV32-NEXT: .cfi_def_cfa s0, 0 ; CHECK-RV32-NEXT: lui a0, %hi(a) ; CHECK-RV32-NEXT: lw a0, %lo(a)(a0) ; CHECK-RV32-NEXT: lui a1, %hi(b) @@ -67,13 +118,74 @@ ; CHECK-RV32-NEXT: add a0, a1, a0 ; CHECK-RV32-NEXT: lui a1, %hi(c) ; CHECK-RV32-NEXT: sw a0, %lo(c)(a1) +; CHECK-RV32-NEXT: .cfi_def_cfa sp, 16 ; CHECK-RV32-NEXT: lw a1, 0(sp) ; CHECK-RV32-NEXT: lw a0, 4(sp) ; CHECK-RV32-NEXT: lw s0, 8(sp) ; CHECK-RV32-NEXT: lw ra, 12(sp) ; CHECK-RV32-NEXT: addi sp, sp, 16 +; CHECK-RV32-NEXT: .cfi_def_cfa_offset 0 ; CHECK-RV32-NEXT: mret ; +; CHECK-RV32-F-LABEL: foo_fp_i32: +; CHECK-RV32-F: # %bb.0: +; CHECK-RV32-F-NEXT: addi sp, sp, -16 +; CHECK-RV32-F-NEXT: .cfi_def_cfa_offset 16 +; CHECK-RV32-F-NEXT: sw ra, 12(sp) +; CHECK-RV32-F-NEXT: sw s0, 8(sp) +; CHECK-RV32-F-NEXT: sw a0, 4(sp) +; CHECK-RV32-F-NEXT: sw a1, 0(sp) +; CHECK-RV32-F-NEXT: .cfi_offset ra, -4 +; CHECK-RV32-F-NEXT: .cfi_offset s0, -8 +; CHECK-RV32-F-NEXT: .cfi_offset a0, -12 +; CHECK-RV32-F-NEXT: .cfi_offset a1, -16 +; CHECK-RV32-F-NEXT: addi s0, sp, 16 +; CHECK-RV32-F-NEXT: .cfi_def_cfa s0, 0 +; CHECK-RV32-F-NEXT: lui a0, %hi(a) +; CHECK-RV32-F-NEXT: lw a0, %lo(a)(a0) +; CHECK-RV32-F-NEXT: lui a1, %hi(b) +; CHECK-RV32-F-NEXT: lw a1, %lo(b)(a1) +; CHECK-RV32-F-NEXT: add a0, a1, a0 +; CHECK-RV32-F-NEXT: lui a1, %hi(c) +; CHECK-RV32-F-NEXT: sw a0, %lo(c)(a1) +; CHECK-RV32-F-NEXT: .cfi_def_cfa sp, 16 +; CHECK-RV32-F-NEXT: lw a1, 0(sp) +; CHECK-RV32-F-NEXT: lw a0, 4(sp) +; CHECK-RV32-F-NEXT: lw s0, 8(sp) +; CHECK-RV32-F-NEXT: lw ra, 12(sp) +; CHECK-RV32-F-NEXT: addi sp, sp, 16 +; CHECK-RV32-F-NEXT: .cfi_def_cfa_offset 0 +; CHECK-RV32-F-NEXT: mret +; +; CHECK-RV32-FD-LABEL: foo_fp_i32: +; CHECK-RV32-FD: # %bb.0: +; CHECK-RV32-FD-NEXT: addi sp, sp, -16 +; CHECK-RV32-FD-NEXT: .cfi_def_cfa_offset 16 +; CHECK-RV32-FD-NEXT: sw ra, 12(sp) +; CHECK-RV32-FD-NEXT: sw s0, 8(sp) +; CHECK-RV32-FD-NEXT: sw a0, 4(sp) +; CHECK-RV32-FD-NEXT: sw a1, 0(sp) +; CHECK-RV32-FD-NEXT: .cfi_offset ra, -4 +; CHECK-RV32-FD-NEXT: .cfi_offset s0, -8 +; CHECK-RV32-FD-NEXT: .cfi_offset a0, -12 +; CHECK-RV32-FD-NEXT: .cfi_offset a1, -16 +; CHECK-RV32-FD-NEXT: addi s0, sp, 16 +; CHECK-RV32-FD-NEXT: .cfi_def_cfa s0, 0 +; CHECK-RV32-FD-NEXT: lui a0, %hi(a) +; CHECK-RV32-FD-NEXT: lw a0, %lo(a)(a0) +; CHECK-RV32-FD-NEXT: lui a1, %hi(b) +; CHECK-RV32-FD-NEXT: lw a1, %lo(b)(a1) +; CHECK-RV32-FD-NEXT: add a0, a1, a0 +; CHECK-RV32-FD-NEXT: lui a1, %hi(c) +; CHECK-RV32-FD-NEXT: sw a0, %lo(c)(a1) +; CHECK-RV32-FD-NEXT: .cfi_def_cfa sp, 16 +; CHECK-RV32-FD-NEXT: lw a1, 0(sp) +; CHECK-RV32-FD-NEXT: lw a0, 4(sp) +; CHECK-RV32-FD-NEXT: lw s0, 8(sp) +; CHECK-RV32-FD-NEXT: lw ra, 12(sp) +; CHECK-RV32-FD-NEXT: addi sp, sp, 16 +; CHECK-RV32-FD-NEXT: .cfi_def_cfa_offset 0 +; CHECK-RV32-FD-NEXT: mret %1 = load i32, i32* @a %2 = load i32, i32* @b %add = add nsw i32 %2, %1 @@ -86,12 +198,79 @@ @d = external global float define void @foo_float() #0 { +; CHECK-RV32-LABEL: foo_float: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: addi sp, sp, -64 +; CHECK-RV32-NEXT: .cfi_def_cfa_offset 64 +; CHECK-RV32-NEXT: sw ra, 60(sp) +; CHECK-RV32-NEXT: sw t0, 56(sp) +; CHECK-RV32-NEXT: sw t1, 52(sp) +; CHECK-RV32-NEXT: sw t2, 48(sp) +; CHECK-RV32-NEXT: sw a0, 44(sp) +; CHECK-RV32-NEXT: sw a1, 40(sp) +; CHECK-RV32-NEXT: sw a2, 36(sp) +; CHECK-RV32-NEXT: sw a3, 32(sp) +; CHECK-RV32-NEXT: sw a4, 28(sp) +; CHECK-RV32-NEXT: sw a5, 24(sp) +; CHECK-RV32-NEXT: sw a6, 20(sp) +; CHECK-RV32-NEXT: sw a7, 16(sp) +; CHECK-RV32-NEXT: sw t3, 12(sp) +; CHECK-RV32-NEXT: sw t4, 8(sp) +; CHECK-RV32-NEXT: sw t5, 4(sp) +; CHECK-RV32-NEXT: sw t6, 0(sp) +; CHECK-RV32-NEXT: .cfi_offset ra, -4 +; CHECK-RV32-NEXT: .cfi_offset t0, -8 +; CHECK-RV32-NEXT: .cfi_offset t1, -12 +; CHECK-RV32-NEXT: .cfi_offset t2, -16 +; CHECK-RV32-NEXT: .cfi_offset a0, -20 +; CHECK-RV32-NEXT: .cfi_offset a1, -24 +; CHECK-RV32-NEXT: .cfi_offset a2, -28 +; CHECK-RV32-NEXT: .cfi_offset a3, -32 +; CHECK-RV32-NEXT: .cfi_offset a4, -36 +; CHECK-RV32-NEXT: .cfi_offset a5, -40 +; CHECK-RV32-NEXT: .cfi_offset a6, -44 +; CHECK-RV32-NEXT: .cfi_offset a7, -48 +; CHECK-RV32-NEXT: .cfi_offset t3, -52 +; CHECK-RV32-NEXT: .cfi_offset t4, -56 +; CHECK-RV32-NEXT: .cfi_offset t5, -60 +; CHECK-RV32-NEXT: .cfi_offset t6, -64 +; CHECK-RV32-NEXT: lui a0, %hi(e) +; CHECK-RV32-NEXT: lw a0, %lo(e)(a0) +; CHECK-RV32-NEXT: lui a1, %hi(f) +; CHECK-RV32-NEXT: lw a1, %lo(f)(a1) +; CHECK-RV32-NEXT: call __addsf3 +; CHECK-RV32-NEXT: lui a1, %hi(d) +; CHECK-RV32-NEXT: sw a0, %lo(d)(a1) +; CHECK-RV32-NEXT: lw t6, 0(sp) +; CHECK-RV32-NEXT: lw t5, 4(sp) +; CHECK-RV32-NEXT: lw t4, 8(sp) +; CHECK-RV32-NEXT: lw t3, 12(sp) +; CHECK-RV32-NEXT: lw a7, 16(sp) +; CHECK-RV32-NEXT: lw a6, 20(sp) +; CHECK-RV32-NEXT: lw a5, 24(sp) +; CHECK-RV32-NEXT: lw a4, 28(sp) +; CHECK-RV32-NEXT: lw a3, 32(sp) +; CHECK-RV32-NEXT: lw a2, 36(sp) +; CHECK-RV32-NEXT: lw a1, 40(sp) +; CHECK-RV32-NEXT: lw a0, 44(sp) +; CHECK-RV32-NEXT: lw t2, 48(sp) +; CHECK-RV32-NEXT: lw t1, 52(sp) +; CHECK-RV32-NEXT: lw t0, 56(sp) +; CHECK-RV32-NEXT: lw ra, 60(sp) +; CHECK-RV32-NEXT: addi sp, sp, 64 +; CHECK-RV32-NEXT: .cfi_def_cfa_offset 0 +; CHECK-RV32-NEXT: mret +; ; CHECK-RV32-F-LABEL: foo_float: ; CHECK-RV32-F: # %bb.0: ; CHECK-RV32-F-NEXT: addi sp, sp, -16 +; CHECK-RV32-F-NEXT: .cfi_def_cfa_offset 16 ; CHECK-RV32-F-NEXT: sw a0, 12(sp) ; CHECK-RV32-F-NEXT: fsw ft0, 8(sp) ; CHECK-RV32-F-NEXT: fsw ft1, 4(sp) +; CHECK-RV32-F-NEXT: .cfi_offset a0, -4 +; CHECK-RV32-F-NEXT: .cfi_offset ft0, -8 +; CHECK-RV32-F-NEXT: .cfi_offset ft1, -12 ; CHECK-RV32-F-NEXT: lui a0, %hi(f) ; CHECK-RV32-F-NEXT: flw ft0, %lo(f)(a0) ; CHECK-RV32-F-NEXT: lui a0, %hi(e) @@ -103,8 +282,32 @@ ; CHECK-RV32-F-NEXT: flw ft0, 8(sp) ; CHECK-RV32-F-NEXT: lw a0, 12(sp) ; CHECK-RV32-F-NEXT: addi sp, sp, 16 +; CHECK-RV32-F-NEXT: .cfi_def_cfa_offset 0 ; CHECK-RV32-F-NEXT: mret ; +; CHECK-RV32-FD-LABEL: foo_float: +; CHECK-RV32-FD: # %bb.0: +; CHECK-RV32-FD-NEXT: addi sp, sp, -32 +; CHECK-RV32-FD-NEXT: .cfi_def_cfa_offset 32 +; CHECK-RV32-FD-NEXT: sw a0, 28(sp) +; CHECK-RV32-FD-NEXT: fsd ft0, 16(sp) +; CHECK-RV32-FD-NEXT: fsd ft1, 8(sp) +; CHECK-RV32-FD-NEXT: .cfi_offset a0, -4 +; CHECK-RV32-FD-NEXT: .cfi_offset ft0, -16 +; CHECK-RV32-FD-NEXT: .cfi_offset ft1, -24 +; CHECK-RV32-FD-NEXT: lui a0, %hi(f) +; CHECK-RV32-FD-NEXT: flw ft0, %lo(f)(a0) +; CHECK-RV32-FD-NEXT: lui a0, %hi(e) +; CHECK-RV32-FD-NEXT: flw ft1, %lo(e)(a0) +; CHECK-RV32-FD-NEXT: fadd.s ft0, ft1, ft0 +; CHECK-RV32-FD-NEXT: lui a0, %hi(d) +; CHECK-RV32-FD-NEXT: fsw ft0, %lo(d)(a0) +; CHECK-RV32-FD-NEXT: fld ft1, 8(sp) +; CHECK-RV32-FD-NEXT: fld ft0, 16(sp) +; CHECK-RV32-FD-NEXT: lw a0, 28(sp) +; CHECK-RV32-FD-NEXT: addi sp, sp, 32 +; CHECK-RV32-FD-NEXT: .cfi_def_cfa_offset 0 +; CHECK-RV32-FD-NEXT: mret %1 = load float, float* @e %2 = load float, float* @f %add = fadd float %1, %2 @@ -116,15 +319,91 @@ ; Additionally check frame pointer and return address are properly saved. ; define void @foo_fp_float() #1 { +; CHECK-RV32-LABEL: foo_fp_float: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: addi sp, sp, -80 +; CHECK-RV32-NEXT: .cfi_def_cfa_offset 80 +; CHECK-RV32-NEXT: sw ra, 76(sp) +; CHECK-RV32-NEXT: sw t0, 72(sp) +; CHECK-RV32-NEXT: sw t1, 68(sp) +; CHECK-RV32-NEXT: sw t2, 64(sp) +; CHECK-RV32-NEXT: sw s0, 60(sp) +; CHECK-RV32-NEXT: sw a0, 56(sp) +; CHECK-RV32-NEXT: sw a1, 52(sp) +; CHECK-RV32-NEXT: sw a2, 48(sp) +; CHECK-RV32-NEXT: sw a3, 44(sp) +; CHECK-RV32-NEXT: sw a4, 40(sp) +; CHECK-RV32-NEXT: sw a5, 36(sp) +; CHECK-RV32-NEXT: sw a6, 32(sp) +; CHECK-RV32-NEXT: sw a7, 28(sp) +; CHECK-RV32-NEXT: sw t3, 24(sp) +; CHECK-RV32-NEXT: sw t4, 20(sp) +; CHECK-RV32-NEXT: sw t5, 16(sp) +; CHECK-RV32-NEXT: sw t6, 12(sp) +; CHECK-RV32-NEXT: .cfi_offset ra, -4 +; CHECK-RV32-NEXT: .cfi_offset t0, -8 +; CHECK-RV32-NEXT: .cfi_offset t1, -12 +; CHECK-RV32-NEXT: .cfi_offset t2, -16 +; CHECK-RV32-NEXT: .cfi_offset s0, -20 +; CHECK-RV32-NEXT: .cfi_offset a0, -24 +; CHECK-RV32-NEXT: .cfi_offset a1, -28 +; CHECK-RV32-NEXT: .cfi_offset a2, -32 +; CHECK-RV32-NEXT: .cfi_offset a3, -36 +; CHECK-RV32-NEXT: .cfi_offset a4, -40 +; CHECK-RV32-NEXT: .cfi_offset a5, -44 +; CHECK-RV32-NEXT: .cfi_offset a6, -48 +; CHECK-RV32-NEXT: .cfi_offset a7, -52 +; CHECK-RV32-NEXT: .cfi_offset t3, -56 +; CHECK-RV32-NEXT: .cfi_offset t4, -60 +; CHECK-RV32-NEXT: .cfi_offset t5, -64 +; CHECK-RV32-NEXT: .cfi_offset t6, -68 +; CHECK-RV32-NEXT: addi s0, sp, 80 +; CHECK-RV32-NEXT: .cfi_def_cfa s0, 0 +; CHECK-RV32-NEXT: lui a0, %hi(e) +; CHECK-RV32-NEXT: lw a0, %lo(e)(a0) +; CHECK-RV32-NEXT: lui a1, %hi(f) +; CHECK-RV32-NEXT: lw a1, %lo(f)(a1) +; CHECK-RV32-NEXT: call __addsf3 +; CHECK-RV32-NEXT: lui a1, %hi(d) +; CHECK-RV32-NEXT: sw a0, %lo(d)(a1) +; CHECK-RV32-NEXT: .cfi_def_cfa sp, 80 +; CHECK-RV32-NEXT: lw t6, 12(sp) +; CHECK-RV32-NEXT: lw t5, 16(sp) +; CHECK-RV32-NEXT: lw t4, 20(sp) +; CHECK-RV32-NEXT: lw t3, 24(sp) +; CHECK-RV32-NEXT: lw a7, 28(sp) +; CHECK-RV32-NEXT: lw a6, 32(sp) +; CHECK-RV32-NEXT: lw a5, 36(sp) +; CHECK-RV32-NEXT: lw a4, 40(sp) +; CHECK-RV32-NEXT: lw a3, 44(sp) +; CHECK-RV32-NEXT: lw a2, 48(sp) +; CHECK-RV32-NEXT: lw a1, 52(sp) +; CHECK-RV32-NEXT: lw a0, 56(sp) +; CHECK-RV32-NEXT: lw s0, 60(sp) +; CHECK-RV32-NEXT: lw t2, 64(sp) +; CHECK-RV32-NEXT: lw t1, 68(sp) +; CHECK-RV32-NEXT: lw t0, 72(sp) +; CHECK-RV32-NEXT: lw ra, 76(sp) +; CHECK-RV32-NEXT: addi sp, sp, 80 +; CHECK-RV32-NEXT: .cfi_def_cfa_offset 0 +; CHECK-RV32-NEXT: mret +; ; CHECK-RV32-F-LABEL: foo_fp_float: ; CHECK-RV32-F: # %bb.0: ; CHECK-RV32-F-NEXT: addi sp, sp, -32 +; CHECK-RV32-F-NEXT: .cfi_def_cfa_offset 32 ; CHECK-RV32-F-NEXT: sw ra, 28(sp) ; CHECK-RV32-F-NEXT: sw s0, 24(sp) ; CHECK-RV32-F-NEXT: sw a0, 20(sp) ; CHECK-RV32-F-NEXT: fsw ft0, 16(sp) ; CHECK-RV32-F-NEXT: fsw ft1, 12(sp) +; CHECK-RV32-F-NEXT: .cfi_offset ra, -4 +; CHECK-RV32-F-NEXT: .cfi_offset s0, -8 +; CHECK-RV32-F-NEXT: .cfi_offset a0, -12 +; CHECK-RV32-F-NEXT: .cfi_offset ft0, -16 +; CHECK-RV32-F-NEXT: .cfi_offset ft1, -20 ; CHECK-RV32-F-NEXT: addi s0, sp, 32 +; CHECK-RV32-F-NEXT: .cfi_def_cfa s0, 0 ; CHECK-RV32-F-NEXT: lui a0, %hi(f) ; CHECK-RV32-F-NEXT: flw ft0, %lo(f)(a0) ; CHECK-RV32-F-NEXT: lui a0, %hi(e) @@ -132,14 +411,48 @@ ; CHECK-RV32-F-NEXT: fadd.s ft0, ft1, ft0 ; CHECK-RV32-F-NEXT: lui a0, %hi(d) ; CHECK-RV32-F-NEXT: fsw ft0, %lo(d)(a0) +; CHECK-RV32-F-NEXT: .cfi_def_cfa sp, 32 ; CHECK-RV32-F-NEXT: flw ft1, 12(sp) ; CHECK-RV32-F-NEXT: flw ft0, 16(sp) ; CHECK-RV32-F-NEXT: lw a0, 20(sp) ; CHECK-RV32-F-NEXT: lw s0, 24(sp) ; CHECK-RV32-F-NEXT: lw ra, 28(sp) ; CHECK-RV32-F-NEXT: addi sp, sp, 32 +; CHECK-RV32-F-NEXT: .cfi_def_cfa_offset 0 ; CHECK-RV32-F-NEXT: mret ; +; CHECK-RV32-FD-LABEL: foo_fp_float: +; CHECK-RV32-FD: # %bb.0: +; CHECK-RV32-FD-NEXT: addi sp, sp, -32 +; CHECK-RV32-FD-NEXT: .cfi_def_cfa_offset 32 +; CHECK-RV32-FD-NEXT: sw ra, 28(sp) +; CHECK-RV32-FD-NEXT: sw s0, 24(sp) +; CHECK-RV32-FD-NEXT: sw a0, 20(sp) +; CHECK-RV32-FD-NEXT: fsd ft0, 8(sp) +; CHECK-RV32-FD-NEXT: fsd ft1, 0(sp) +; CHECK-RV32-FD-NEXT: .cfi_offset ra, -4 +; CHECK-RV32-FD-NEXT: .cfi_offset s0, -8 +; CHECK-RV32-FD-NEXT: .cfi_offset a0, -12 +; CHECK-RV32-FD-NEXT: .cfi_offset ft0, -24 +; CHECK-RV32-FD-NEXT: .cfi_offset ft1, -32 +; CHECK-RV32-FD-NEXT: addi s0, sp, 32 +; CHECK-RV32-FD-NEXT: .cfi_def_cfa s0, 0 +; CHECK-RV32-FD-NEXT: lui a0, %hi(f) +; CHECK-RV32-FD-NEXT: flw ft0, %lo(f)(a0) +; CHECK-RV32-FD-NEXT: lui a0, %hi(e) +; CHECK-RV32-FD-NEXT: flw ft1, %lo(e)(a0) +; CHECK-RV32-FD-NEXT: fadd.s ft0, ft1, ft0 +; CHECK-RV32-FD-NEXT: lui a0, %hi(d) +; CHECK-RV32-FD-NEXT: fsw ft0, %lo(d)(a0) +; CHECK-RV32-FD-NEXT: .cfi_def_cfa sp, 32 +; CHECK-RV32-FD-NEXT: fld ft1, 0(sp) +; CHECK-RV32-FD-NEXT: fld ft0, 8(sp) +; CHECK-RV32-FD-NEXT: lw a0, 20(sp) +; CHECK-RV32-FD-NEXT: lw s0, 24(sp) +; CHECK-RV32-FD-NEXT: lw ra, 28(sp) +; CHECK-RV32-FD-NEXT: addi sp, sp, 32 +; CHECK-RV32-FD-NEXT: .cfi_def_cfa_offset 0 +; CHECK-RV32-FD-NEXT: mret %1 = load float, float* @e %2 = load float, float* @f %add = fadd float %1, %2 @@ -152,12 +465,250 @@ @g = external global double define void @foo_double() #0 { +; CHECK-RV32-LABEL: foo_double: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: addi sp, sp, -64 +; CHECK-RV32-NEXT: .cfi_def_cfa_offset 64 +; CHECK-RV32-NEXT: sw ra, 60(sp) +; CHECK-RV32-NEXT: sw t0, 56(sp) +; CHECK-RV32-NEXT: sw t1, 52(sp) +; CHECK-RV32-NEXT: sw t2, 48(sp) +; CHECK-RV32-NEXT: sw a0, 44(sp) +; CHECK-RV32-NEXT: sw a1, 40(sp) +; CHECK-RV32-NEXT: sw a2, 36(sp) +; CHECK-RV32-NEXT: sw a3, 32(sp) +; CHECK-RV32-NEXT: sw a4, 28(sp) +; CHECK-RV32-NEXT: sw a5, 24(sp) +; CHECK-RV32-NEXT: sw a6, 20(sp) +; CHECK-RV32-NEXT: sw a7, 16(sp) +; CHECK-RV32-NEXT: sw t3, 12(sp) +; CHECK-RV32-NEXT: sw t4, 8(sp) +; CHECK-RV32-NEXT: sw t5, 4(sp) +; CHECK-RV32-NEXT: sw t6, 0(sp) +; CHECK-RV32-NEXT: .cfi_offset ra, -4 +; CHECK-RV32-NEXT: .cfi_offset t0, -8 +; CHECK-RV32-NEXT: .cfi_offset t1, -12 +; CHECK-RV32-NEXT: .cfi_offset t2, -16 +; CHECK-RV32-NEXT: .cfi_offset a0, -20 +; CHECK-RV32-NEXT: .cfi_offset a1, -24 +; CHECK-RV32-NEXT: .cfi_offset a2, -28 +; CHECK-RV32-NEXT: .cfi_offset a3, -32 +; CHECK-RV32-NEXT: .cfi_offset a4, -36 +; CHECK-RV32-NEXT: .cfi_offset a5, -40 +; CHECK-RV32-NEXT: .cfi_offset a6, -44 +; CHECK-RV32-NEXT: .cfi_offset a7, -48 +; CHECK-RV32-NEXT: .cfi_offset t3, -52 +; CHECK-RV32-NEXT: .cfi_offset t4, -56 +; CHECK-RV32-NEXT: .cfi_offset t5, -60 +; CHECK-RV32-NEXT: .cfi_offset t6, -64 +; CHECK-RV32-NEXT: lui a1, %hi(h) +; CHECK-RV32-NEXT: lw a0, %lo(h)(a1) +; CHECK-RV32-NEXT: addi a1, a1, %lo(h) +; CHECK-RV32-NEXT: lw a1, 4(a1) +; CHECK-RV32-NEXT: lui a3, %hi(i) +; CHECK-RV32-NEXT: lw a2, %lo(i)(a3) +; CHECK-RV32-NEXT: addi a3, a3, %lo(i) +; CHECK-RV32-NEXT: lw a3, 4(a3) +; CHECK-RV32-NEXT: call __adddf3 +; CHECK-RV32-NEXT: lui a2, %hi(g) +; CHECK-RV32-NEXT: addi a3, a2, %lo(g) +; CHECK-RV32-NEXT: sw a1, 4(a3) +; CHECK-RV32-NEXT: sw a0, %lo(g)(a2) +; CHECK-RV32-NEXT: lw t6, 0(sp) +; CHECK-RV32-NEXT: lw t5, 4(sp) +; CHECK-RV32-NEXT: lw t4, 8(sp) +; CHECK-RV32-NEXT: lw t3, 12(sp) +; CHECK-RV32-NEXT: lw a7, 16(sp) +; CHECK-RV32-NEXT: lw a6, 20(sp) +; CHECK-RV32-NEXT: lw a5, 24(sp) +; CHECK-RV32-NEXT: lw a4, 28(sp) +; CHECK-RV32-NEXT: lw a3, 32(sp) +; CHECK-RV32-NEXT: lw a2, 36(sp) +; CHECK-RV32-NEXT: lw a1, 40(sp) +; CHECK-RV32-NEXT: lw a0, 44(sp) +; CHECK-RV32-NEXT: lw t2, 48(sp) +; CHECK-RV32-NEXT: lw t1, 52(sp) +; CHECK-RV32-NEXT: lw t0, 56(sp) +; CHECK-RV32-NEXT: lw ra, 60(sp) +; CHECK-RV32-NEXT: addi sp, sp, 64 +; CHECK-RV32-NEXT: .cfi_def_cfa_offset 0 +; CHECK-RV32-NEXT: mret +; +; CHECK-RV32-F-LABEL: foo_double: +; CHECK-RV32-F: # %bb.0: +; CHECK-RV32-F-NEXT: addi sp, sp, -192 +; CHECK-RV32-F-NEXT: .cfi_def_cfa_offset 192 +; CHECK-RV32-F-NEXT: sw ra, 188(sp) +; CHECK-RV32-F-NEXT: sw t0, 184(sp) +; CHECK-RV32-F-NEXT: sw t1, 180(sp) +; CHECK-RV32-F-NEXT: sw t2, 176(sp) +; CHECK-RV32-F-NEXT: sw a0, 172(sp) +; CHECK-RV32-F-NEXT: sw a1, 168(sp) +; CHECK-RV32-F-NEXT: sw a2, 164(sp) +; CHECK-RV32-F-NEXT: sw a3, 160(sp) +; CHECK-RV32-F-NEXT: sw a4, 156(sp) +; CHECK-RV32-F-NEXT: sw a5, 152(sp) +; CHECK-RV32-F-NEXT: sw a6, 148(sp) +; CHECK-RV32-F-NEXT: sw a7, 144(sp) +; CHECK-RV32-F-NEXT: sw t3, 140(sp) +; CHECK-RV32-F-NEXT: sw t4, 136(sp) +; CHECK-RV32-F-NEXT: sw t5, 132(sp) +; CHECK-RV32-F-NEXT: sw t6, 128(sp) +; CHECK-RV32-F-NEXT: fsw ft0, 124(sp) +; CHECK-RV32-F-NEXT: fsw ft1, 120(sp) +; CHECK-RV32-F-NEXT: fsw ft2, 116(sp) +; CHECK-RV32-F-NEXT: fsw ft3, 112(sp) +; CHECK-RV32-F-NEXT: fsw ft4, 108(sp) +; CHECK-RV32-F-NEXT: fsw ft5, 104(sp) +; CHECK-RV32-F-NEXT: fsw ft6, 100(sp) +; CHECK-RV32-F-NEXT: fsw ft7, 96(sp) +; CHECK-RV32-F-NEXT: fsw fa0, 92(sp) +; CHECK-RV32-F-NEXT: fsw fa1, 88(sp) +; CHECK-RV32-F-NEXT: fsw fa2, 84(sp) +; CHECK-RV32-F-NEXT: fsw fa3, 80(sp) +; CHECK-RV32-F-NEXT: fsw fa4, 76(sp) +; CHECK-RV32-F-NEXT: fsw fa5, 72(sp) +; CHECK-RV32-F-NEXT: fsw fa6, 68(sp) +; CHECK-RV32-F-NEXT: fsw fa7, 64(sp) +; CHECK-RV32-F-NEXT: fsw ft8, 60(sp) +; CHECK-RV32-F-NEXT: fsw ft9, 56(sp) +; CHECK-RV32-F-NEXT: fsw ft10, 52(sp) +; CHECK-RV32-F-NEXT: fsw ft11, 48(sp) +; CHECK-RV32-F-NEXT: fsw fs0, 44(sp) +; CHECK-RV32-F-NEXT: fsw fs1, 40(sp) +; CHECK-RV32-F-NEXT: fsw fs2, 36(sp) +; CHECK-RV32-F-NEXT: fsw fs3, 32(sp) +; CHECK-RV32-F-NEXT: fsw fs4, 28(sp) +; CHECK-RV32-F-NEXT: fsw fs5, 24(sp) +; CHECK-RV32-F-NEXT: fsw fs6, 20(sp) +; CHECK-RV32-F-NEXT: fsw fs7, 16(sp) +; CHECK-RV32-F-NEXT: fsw fs8, 12(sp) +; CHECK-RV32-F-NEXT: fsw fs9, 8(sp) +; CHECK-RV32-F-NEXT: fsw fs10, 4(sp) +; CHECK-RV32-F-NEXT: fsw fs11, 0(sp) +; CHECK-RV32-F-NEXT: .cfi_offset ra, -4 +; CHECK-RV32-F-NEXT: .cfi_offset t0, -8 +; CHECK-RV32-F-NEXT: .cfi_offset t1, -12 +; CHECK-RV32-F-NEXT: .cfi_offset t2, -16 +; CHECK-RV32-F-NEXT: .cfi_offset a0, -20 +; CHECK-RV32-F-NEXT: .cfi_offset a1, -24 +; CHECK-RV32-F-NEXT: .cfi_offset a2, -28 +; CHECK-RV32-F-NEXT: .cfi_offset a3, -32 +; CHECK-RV32-F-NEXT: .cfi_offset a4, -36 +; CHECK-RV32-F-NEXT: .cfi_offset a5, -40 +; CHECK-RV32-F-NEXT: .cfi_offset a6, -44 +; CHECK-RV32-F-NEXT: .cfi_offset a7, -48 +; CHECK-RV32-F-NEXT: .cfi_offset t3, -52 +; CHECK-RV32-F-NEXT: .cfi_offset t4, -56 +; CHECK-RV32-F-NEXT: .cfi_offset t5, -60 +; CHECK-RV32-F-NEXT: .cfi_offset t6, -64 +; CHECK-RV32-F-NEXT: .cfi_offset ft0, -68 +; CHECK-RV32-F-NEXT: .cfi_offset ft1, -72 +; CHECK-RV32-F-NEXT: .cfi_offset ft2, -76 +; CHECK-RV32-F-NEXT: .cfi_offset ft3, -80 +; CHECK-RV32-F-NEXT: .cfi_offset ft4, -84 +; CHECK-RV32-F-NEXT: .cfi_offset ft5, -88 +; CHECK-RV32-F-NEXT: .cfi_offset ft6, -92 +; CHECK-RV32-F-NEXT: .cfi_offset ft7, -96 +; CHECK-RV32-F-NEXT: .cfi_offset fa0, -100 +; CHECK-RV32-F-NEXT: .cfi_offset fa1, -104 +; CHECK-RV32-F-NEXT: .cfi_offset fa2, -108 +; CHECK-RV32-F-NEXT: .cfi_offset fa3, -112 +; CHECK-RV32-F-NEXT: .cfi_offset fa4, -116 +; CHECK-RV32-F-NEXT: .cfi_offset fa5, -120 +; CHECK-RV32-F-NEXT: .cfi_offset fa6, -124 +; CHECK-RV32-F-NEXT: .cfi_offset fa7, -128 +; CHECK-RV32-F-NEXT: .cfi_offset ft8, -132 +; CHECK-RV32-F-NEXT: .cfi_offset ft9, -136 +; CHECK-RV32-F-NEXT: .cfi_offset ft10, -140 +; CHECK-RV32-F-NEXT: .cfi_offset ft11, -144 +; CHECK-RV32-F-NEXT: .cfi_offset fs0, -148 +; CHECK-RV32-F-NEXT: .cfi_offset fs1, -152 +; CHECK-RV32-F-NEXT: .cfi_offset fs2, -156 +; CHECK-RV32-F-NEXT: .cfi_offset fs3, -160 +; CHECK-RV32-F-NEXT: .cfi_offset fs4, -164 +; CHECK-RV32-F-NEXT: .cfi_offset fs5, -168 +; CHECK-RV32-F-NEXT: .cfi_offset fs6, -172 +; CHECK-RV32-F-NEXT: .cfi_offset fs7, -176 +; CHECK-RV32-F-NEXT: .cfi_offset fs8, -180 +; CHECK-RV32-F-NEXT: .cfi_offset fs9, -184 +; CHECK-RV32-F-NEXT: .cfi_offset fs10, -188 +; CHECK-RV32-F-NEXT: .cfi_offset fs11, -192 +; CHECK-RV32-F-NEXT: lui a1, %hi(h) +; CHECK-RV32-F-NEXT: lw a0, %lo(h)(a1) +; CHECK-RV32-F-NEXT: addi a1, a1, %lo(h) +; CHECK-RV32-F-NEXT: lw a1, 4(a1) +; CHECK-RV32-F-NEXT: lui a3, %hi(i) +; CHECK-RV32-F-NEXT: lw a2, %lo(i)(a3) +; CHECK-RV32-F-NEXT: addi a3, a3, %lo(i) +; CHECK-RV32-F-NEXT: lw a3, 4(a3) +; CHECK-RV32-F-NEXT: call __adddf3 +; CHECK-RV32-F-NEXT: lui a2, %hi(g) +; CHECK-RV32-F-NEXT: addi a3, a2, %lo(g) +; CHECK-RV32-F-NEXT: sw a1, 4(a3) +; CHECK-RV32-F-NEXT: sw a0, %lo(g)(a2) +; CHECK-RV32-F-NEXT: flw fs11, 0(sp) +; CHECK-RV32-F-NEXT: flw fs10, 4(sp) +; CHECK-RV32-F-NEXT: flw fs9, 8(sp) +; CHECK-RV32-F-NEXT: flw fs8, 12(sp) +; CHECK-RV32-F-NEXT: flw fs7, 16(sp) +; CHECK-RV32-F-NEXT: flw fs6, 20(sp) +; CHECK-RV32-F-NEXT: flw fs5, 24(sp) +; CHECK-RV32-F-NEXT: flw fs4, 28(sp) +; CHECK-RV32-F-NEXT: flw fs3, 32(sp) +; CHECK-RV32-F-NEXT: flw fs2, 36(sp) +; CHECK-RV32-F-NEXT: flw fs1, 40(sp) +; CHECK-RV32-F-NEXT: flw fs0, 44(sp) +; CHECK-RV32-F-NEXT: flw ft11, 48(sp) +; CHECK-RV32-F-NEXT: flw ft10, 52(sp) +; CHECK-RV32-F-NEXT: flw ft9, 56(sp) +; CHECK-RV32-F-NEXT: flw ft8, 60(sp) +; CHECK-RV32-F-NEXT: flw fa7, 64(sp) +; CHECK-RV32-F-NEXT: flw fa6, 68(sp) +; CHECK-RV32-F-NEXT: flw fa5, 72(sp) +; CHECK-RV32-F-NEXT: flw fa4, 76(sp) +; CHECK-RV32-F-NEXT: flw fa3, 80(sp) +; CHECK-RV32-F-NEXT: flw fa2, 84(sp) +; CHECK-RV32-F-NEXT: flw fa1, 88(sp) +; CHECK-RV32-F-NEXT: flw fa0, 92(sp) +; CHECK-RV32-F-NEXT: flw ft7, 96(sp) +; CHECK-RV32-F-NEXT: flw ft6, 100(sp) +; CHECK-RV32-F-NEXT: flw ft5, 104(sp) +; CHECK-RV32-F-NEXT: flw ft4, 108(sp) +; CHECK-RV32-F-NEXT: flw ft3, 112(sp) +; CHECK-RV32-F-NEXT: flw ft2, 116(sp) +; CHECK-RV32-F-NEXT: flw ft1, 120(sp) +; CHECK-RV32-F-NEXT: flw ft0, 124(sp) +; CHECK-RV32-F-NEXT: lw t6, 128(sp) +; CHECK-RV32-F-NEXT: lw t5, 132(sp) +; CHECK-RV32-F-NEXT: lw t4, 136(sp) +; CHECK-RV32-F-NEXT: lw t3, 140(sp) +; CHECK-RV32-F-NEXT: lw a7, 144(sp) +; CHECK-RV32-F-NEXT: lw a6, 148(sp) +; CHECK-RV32-F-NEXT: lw a5, 152(sp) +; CHECK-RV32-F-NEXT: lw a4, 156(sp) +; CHECK-RV32-F-NEXT: lw a3, 160(sp) +; CHECK-RV32-F-NEXT: lw a2, 164(sp) +; CHECK-RV32-F-NEXT: lw a1, 168(sp) +; CHECK-RV32-F-NEXT: lw a0, 172(sp) +; CHECK-RV32-F-NEXT: lw t2, 176(sp) +; CHECK-RV32-F-NEXT: lw t1, 180(sp) +; CHECK-RV32-F-NEXT: lw t0, 184(sp) +; CHECK-RV32-F-NEXT: lw ra, 188(sp) +; CHECK-RV32-F-NEXT: addi sp, sp, 192 +; CHECK-RV32-F-NEXT: .cfi_def_cfa_offset 0 +; CHECK-RV32-F-NEXT: mret +; ; CHECK-RV32-FD-LABEL: foo_double: ; CHECK-RV32-FD: # %bb.0: ; CHECK-RV32-FD-NEXT: addi sp, sp, -32 +; CHECK-RV32-FD-NEXT: .cfi_def_cfa_offset 32 ; CHECK-RV32-FD-NEXT: sw a0, 28(sp) ; CHECK-RV32-FD-NEXT: fsd ft0, 16(sp) ; CHECK-RV32-FD-NEXT: fsd ft1, 8(sp) +; CHECK-RV32-FD-NEXT: .cfi_offset a0, -4 +; CHECK-RV32-FD-NEXT: .cfi_offset ft0, -16 +; CHECK-RV32-FD-NEXT: .cfi_offset ft1, -24 ; CHECK-RV32-FD-NEXT: lui a0, %hi(i) ; CHECK-RV32-FD-NEXT: fld ft0, %lo(i)(a0) ; CHECK-RV32-FD-NEXT: lui a0, %hi(h) @@ -169,8 +720,8 @@ ; CHECK-RV32-FD-NEXT: fld ft0, 16(sp) ; CHECK-RV32-FD-NEXT: lw a0, 28(sp) ; CHECK-RV32-FD-NEXT: addi sp, sp, 32 +; CHECK-RV32-FD-NEXT: .cfi_def_cfa_offset 0 ; CHECK-RV32-FD-NEXT: mret -; %1 = load double, double* @h %2 = load double, double* @i %add = fadd double %1, %2 @@ -182,15 +733,268 @@ ; Additionally check frame pointer and return address are properly saved. ; define void @foo_fp_double() #1 { +; CHECK-RV32-LABEL: foo_fp_double: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: addi sp, sp, -80 +; CHECK-RV32-NEXT: .cfi_def_cfa_offset 80 +; CHECK-RV32-NEXT: sw ra, 76(sp) +; CHECK-RV32-NEXT: sw t0, 72(sp) +; CHECK-RV32-NEXT: sw t1, 68(sp) +; CHECK-RV32-NEXT: sw t2, 64(sp) +; CHECK-RV32-NEXT: sw s0, 60(sp) +; CHECK-RV32-NEXT: sw a0, 56(sp) +; CHECK-RV32-NEXT: sw a1, 52(sp) +; CHECK-RV32-NEXT: sw a2, 48(sp) +; CHECK-RV32-NEXT: sw a3, 44(sp) +; CHECK-RV32-NEXT: sw a4, 40(sp) +; CHECK-RV32-NEXT: sw a5, 36(sp) +; CHECK-RV32-NEXT: sw a6, 32(sp) +; CHECK-RV32-NEXT: sw a7, 28(sp) +; CHECK-RV32-NEXT: sw t3, 24(sp) +; CHECK-RV32-NEXT: sw t4, 20(sp) +; CHECK-RV32-NEXT: sw t5, 16(sp) +; CHECK-RV32-NEXT: sw t6, 12(sp) +; CHECK-RV32-NEXT: .cfi_offset ra, -4 +; CHECK-RV32-NEXT: .cfi_offset t0, -8 +; CHECK-RV32-NEXT: .cfi_offset t1, -12 +; CHECK-RV32-NEXT: .cfi_offset t2, -16 +; CHECK-RV32-NEXT: .cfi_offset s0, -20 +; CHECK-RV32-NEXT: .cfi_offset a0, -24 +; CHECK-RV32-NEXT: .cfi_offset a1, -28 +; CHECK-RV32-NEXT: .cfi_offset a2, -32 +; CHECK-RV32-NEXT: .cfi_offset a3, -36 +; CHECK-RV32-NEXT: .cfi_offset a4, -40 +; CHECK-RV32-NEXT: .cfi_offset a5, -44 +; CHECK-RV32-NEXT: .cfi_offset a6, -48 +; CHECK-RV32-NEXT: .cfi_offset a7, -52 +; CHECK-RV32-NEXT: .cfi_offset t3, -56 +; CHECK-RV32-NEXT: .cfi_offset t4, -60 +; CHECK-RV32-NEXT: .cfi_offset t5, -64 +; CHECK-RV32-NEXT: .cfi_offset t6, -68 +; CHECK-RV32-NEXT: addi s0, sp, 80 +; CHECK-RV32-NEXT: .cfi_def_cfa s0, 0 +; CHECK-RV32-NEXT: lui a1, %hi(h) +; CHECK-RV32-NEXT: lw a0, %lo(h)(a1) +; CHECK-RV32-NEXT: addi a1, a1, %lo(h) +; CHECK-RV32-NEXT: lw a1, 4(a1) +; CHECK-RV32-NEXT: lui a3, %hi(i) +; CHECK-RV32-NEXT: lw a2, %lo(i)(a3) +; CHECK-RV32-NEXT: addi a3, a3, %lo(i) +; CHECK-RV32-NEXT: lw a3, 4(a3) +; CHECK-RV32-NEXT: call __adddf3 +; CHECK-RV32-NEXT: lui a2, %hi(g) +; CHECK-RV32-NEXT: addi a3, a2, %lo(g) +; CHECK-RV32-NEXT: sw a1, 4(a3) +; CHECK-RV32-NEXT: sw a0, %lo(g)(a2) +; CHECK-RV32-NEXT: .cfi_def_cfa sp, 80 +; CHECK-RV32-NEXT: lw t6, 12(sp) +; CHECK-RV32-NEXT: lw t5, 16(sp) +; CHECK-RV32-NEXT: lw t4, 20(sp) +; CHECK-RV32-NEXT: lw t3, 24(sp) +; CHECK-RV32-NEXT: lw a7, 28(sp) +; CHECK-RV32-NEXT: lw a6, 32(sp) +; CHECK-RV32-NEXT: lw a5, 36(sp) +; CHECK-RV32-NEXT: lw a4, 40(sp) +; CHECK-RV32-NEXT: lw a3, 44(sp) +; CHECK-RV32-NEXT: lw a2, 48(sp) +; CHECK-RV32-NEXT: lw a1, 52(sp) +; CHECK-RV32-NEXT: lw a0, 56(sp) +; CHECK-RV32-NEXT: lw s0, 60(sp) +; CHECK-RV32-NEXT: lw t2, 64(sp) +; CHECK-RV32-NEXT: lw t1, 68(sp) +; CHECK-RV32-NEXT: lw t0, 72(sp) +; CHECK-RV32-NEXT: lw ra, 76(sp) +; CHECK-RV32-NEXT: addi sp, sp, 80 +; CHECK-RV32-NEXT: .cfi_def_cfa_offset 0 +; CHECK-RV32-NEXT: mret +; +; CHECK-RV32-F-LABEL: foo_fp_double: +; CHECK-RV32-F: # %bb.0: +; CHECK-RV32-F-NEXT: addi sp, sp, -208 +; CHECK-RV32-F-NEXT: .cfi_def_cfa_offset 208 +; CHECK-RV32-F-NEXT: sw ra, 204(sp) +; CHECK-RV32-F-NEXT: sw t0, 200(sp) +; CHECK-RV32-F-NEXT: sw t1, 196(sp) +; CHECK-RV32-F-NEXT: sw t2, 192(sp) +; CHECK-RV32-F-NEXT: sw s0, 188(sp) +; CHECK-RV32-F-NEXT: sw a0, 184(sp) +; CHECK-RV32-F-NEXT: sw a1, 180(sp) +; CHECK-RV32-F-NEXT: sw a2, 176(sp) +; CHECK-RV32-F-NEXT: sw a3, 172(sp) +; CHECK-RV32-F-NEXT: sw a4, 168(sp) +; CHECK-RV32-F-NEXT: sw a5, 164(sp) +; CHECK-RV32-F-NEXT: sw a6, 160(sp) +; CHECK-RV32-F-NEXT: sw a7, 156(sp) +; CHECK-RV32-F-NEXT: sw t3, 152(sp) +; CHECK-RV32-F-NEXT: sw t4, 148(sp) +; CHECK-RV32-F-NEXT: sw t5, 144(sp) +; CHECK-RV32-F-NEXT: sw t6, 140(sp) +; CHECK-RV32-F-NEXT: fsw ft0, 136(sp) +; CHECK-RV32-F-NEXT: fsw ft1, 132(sp) +; CHECK-RV32-F-NEXT: fsw ft2, 128(sp) +; CHECK-RV32-F-NEXT: fsw ft3, 124(sp) +; CHECK-RV32-F-NEXT: fsw ft4, 120(sp) +; CHECK-RV32-F-NEXT: fsw ft5, 116(sp) +; CHECK-RV32-F-NEXT: fsw ft6, 112(sp) +; CHECK-RV32-F-NEXT: fsw ft7, 108(sp) +; CHECK-RV32-F-NEXT: fsw fa0, 104(sp) +; CHECK-RV32-F-NEXT: fsw fa1, 100(sp) +; CHECK-RV32-F-NEXT: fsw fa2, 96(sp) +; CHECK-RV32-F-NEXT: fsw fa3, 92(sp) +; CHECK-RV32-F-NEXT: fsw fa4, 88(sp) +; CHECK-RV32-F-NEXT: fsw fa5, 84(sp) +; CHECK-RV32-F-NEXT: fsw fa6, 80(sp) +; CHECK-RV32-F-NEXT: fsw fa7, 76(sp) +; CHECK-RV32-F-NEXT: fsw ft8, 72(sp) +; CHECK-RV32-F-NEXT: fsw ft9, 68(sp) +; CHECK-RV32-F-NEXT: fsw ft10, 64(sp) +; CHECK-RV32-F-NEXT: fsw ft11, 60(sp) +; CHECK-RV32-F-NEXT: fsw fs0, 56(sp) +; CHECK-RV32-F-NEXT: fsw fs1, 52(sp) +; CHECK-RV32-F-NEXT: fsw fs2, 48(sp) +; CHECK-RV32-F-NEXT: fsw fs3, 44(sp) +; CHECK-RV32-F-NEXT: fsw fs4, 40(sp) +; CHECK-RV32-F-NEXT: fsw fs5, 36(sp) +; CHECK-RV32-F-NEXT: fsw fs6, 32(sp) +; CHECK-RV32-F-NEXT: fsw fs7, 28(sp) +; CHECK-RV32-F-NEXT: fsw fs8, 24(sp) +; CHECK-RV32-F-NEXT: fsw fs9, 20(sp) +; CHECK-RV32-F-NEXT: fsw fs10, 16(sp) +; CHECK-RV32-F-NEXT: fsw fs11, 12(sp) +; CHECK-RV32-F-NEXT: .cfi_offset ra, -4 +; CHECK-RV32-F-NEXT: .cfi_offset t0, -8 +; CHECK-RV32-F-NEXT: .cfi_offset t1, -12 +; CHECK-RV32-F-NEXT: .cfi_offset t2, -16 +; CHECK-RV32-F-NEXT: .cfi_offset s0, -20 +; CHECK-RV32-F-NEXT: .cfi_offset a0, -24 +; CHECK-RV32-F-NEXT: .cfi_offset a1, -28 +; CHECK-RV32-F-NEXT: .cfi_offset a2, -32 +; CHECK-RV32-F-NEXT: .cfi_offset a3, -36 +; CHECK-RV32-F-NEXT: .cfi_offset a4, -40 +; CHECK-RV32-F-NEXT: .cfi_offset a5, -44 +; CHECK-RV32-F-NEXT: .cfi_offset a6, -48 +; CHECK-RV32-F-NEXT: .cfi_offset a7, -52 +; CHECK-RV32-F-NEXT: .cfi_offset t3, -56 +; CHECK-RV32-F-NEXT: .cfi_offset t4, -60 +; CHECK-RV32-F-NEXT: .cfi_offset t5, -64 +; CHECK-RV32-F-NEXT: .cfi_offset t6, -68 +; CHECK-RV32-F-NEXT: .cfi_offset ft0, -72 +; CHECK-RV32-F-NEXT: .cfi_offset ft1, -76 +; CHECK-RV32-F-NEXT: .cfi_offset ft2, -80 +; CHECK-RV32-F-NEXT: .cfi_offset ft3, -84 +; CHECK-RV32-F-NEXT: .cfi_offset ft4, -88 +; CHECK-RV32-F-NEXT: .cfi_offset ft5, -92 +; CHECK-RV32-F-NEXT: .cfi_offset ft6, -96 +; CHECK-RV32-F-NEXT: .cfi_offset ft7, -100 +; CHECK-RV32-F-NEXT: .cfi_offset fa0, -104 +; CHECK-RV32-F-NEXT: .cfi_offset fa1, -108 +; CHECK-RV32-F-NEXT: .cfi_offset fa2, -112 +; CHECK-RV32-F-NEXT: .cfi_offset fa3, -116 +; CHECK-RV32-F-NEXT: .cfi_offset fa4, -120 +; CHECK-RV32-F-NEXT: .cfi_offset fa5, -124 +; CHECK-RV32-F-NEXT: .cfi_offset fa6, -128 +; CHECK-RV32-F-NEXT: .cfi_offset fa7, -132 +; CHECK-RV32-F-NEXT: .cfi_offset ft8, -136 +; CHECK-RV32-F-NEXT: .cfi_offset ft9, -140 +; CHECK-RV32-F-NEXT: .cfi_offset ft10, -144 +; CHECK-RV32-F-NEXT: .cfi_offset ft11, -148 +; CHECK-RV32-F-NEXT: .cfi_offset fs0, -152 +; CHECK-RV32-F-NEXT: .cfi_offset fs1, -156 +; CHECK-RV32-F-NEXT: .cfi_offset fs2, -160 +; CHECK-RV32-F-NEXT: .cfi_offset fs3, -164 +; CHECK-RV32-F-NEXT: .cfi_offset fs4, -168 +; CHECK-RV32-F-NEXT: .cfi_offset fs5, -172 +; CHECK-RV32-F-NEXT: .cfi_offset fs6, -176 +; CHECK-RV32-F-NEXT: .cfi_offset fs7, -180 +; CHECK-RV32-F-NEXT: .cfi_offset fs8, -184 +; CHECK-RV32-F-NEXT: .cfi_offset fs9, -188 +; CHECK-RV32-F-NEXT: .cfi_offset fs10, -192 +; CHECK-RV32-F-NEXT: .cfi_offset fs11, -196 +; CHECK-RV32-F-NEXT: addi s0, sp, 208 +; CHECK-RV32-F-NEXT: .cfi_def_cfa s0, 0 +; CHECK-RV32-F-NEXT: lui a1, %hi(h) +; CHECK-RV32-F-NEXT: lw a0, %lo(h)(a1) +; CHECK-RV32-F-NEXT: addi a1, a1, %lo(h) +; CHECK-RV32-F-NEXT: lw a1, 4(a1) +; CHECK-RV32-F-NEXT: lui a3, %hi(i) +; CHECK-RV32-F-NEXT: lw a2, %lo(i)(a3) +; CHECK-RV32-F-NEXT: addi a3, a3, %lo(i) +; CHECK-RV32-F-NEXT: lw a3, 4(a3) +; CHECK-RV32-F-NEXT: call __adddf3 +; CHECK-RV32-F-NEXT: lui a2, %hi(g) +; CHECK-RV32-F-NEXT: addi a3, a2, %lo(g) +; CHECK-RV32-F-NEXT: sw a1, 4(a3) +; CHECK-RV32-F-NEXT: sw a0, %lo(g)(a2) +; CHECK-RV32-F-NEXT: .cfi_def_cfa sp, 208 +; CHECK-RV32-F-NEXT: flw fs11, 12(sp) +; CHECK-RV32-F-NEXT: flw fs10, 16(sp) +; CHECK-RV32-F-NEXT: flw fs9, 20(sp) +; CHECK-RV32-F-NEXT: flw fs8, 24(sp) +; CHECK-RV32-F-NEXT: flw fs7, 28(sp) +; CHECK-RV32-F-NEXT: flw fs6, 32(sp) +; CHECK-RV32-F-NEXT: flw fs5, 36(sp) +; CHECK-RV32-F-NEXT: flw fs4, 40(sp) +; CHECK-RV32-F-NEXT: flw fs3, 44(sp) +; CHECK-RV32-F-NEXT: flw fs2, 48(sp) +; CHECK-RV32-F-NEXT: flw fs1, 52(sp) +; CHECK-RV32-F-NEXT: flw fs0, 56(sp) +; CHECK-RV32-F-NEXT: flw ft11, 60(sp) +; CHECK-RV32-F-NEXT: flw ft10, 64(sp) +; CHECK-RV32-F-NEXT: flw ft9, 68(sp) +; CHECK-RV32-F-NEXT: flw ft8, 72(sp) +; CHECK-RV32-F-NEXT: flw fa7, 76(sp) +; CHECK-RV32-F-NEXT: flw fa6, 80(sp) +; CHECK-RV32-F-NEXT: flw fa5, 84(sp) +; CHECK-RV32-F-NEXT: flw fa4, 88(sp) +; CHECK-RV32-F-NEXT: flw fa3, 92(sp) +; CHECK-RV32-F-NEXT: flw fa2, 96(sp) +; CHECK-RV32-F-NEXT: flw fa1, 100(sp) +; CHECK-RV32-F-NEXT: flw fa0, 104(sp) +; CHECK-RV32-F-NEXT: flw ft7, 108(sp) +; CHECK-RV32-F-NEXT: flw ft6, 112(sp) +; CHECK-RV32-F-NEXT: flw ft5, 116(sp) +; CHECK-RV32-F-NEXT: flw ft4, 120(sp) +; CHECK-RV32-F-NEXT: flw ft3, 124(sp) +; CHECK-RV32-F-NEXT: flw ft2, 128(sp) +; CHECK-RV32-F-NEXT: flw ft1, 132(sp) +; CHECK-RV32-F-NEXT: flw ft0, 136(sp) +; CHECK-RV32-F-NEXT: lw t6, 140(sp) +; CHECK-RV32-F-NEXT: lw t5, 144(sp) +; CHECK-RV32-F-NEXT: lw t4, 148(sp) +; CHECK-RV32-F-NEXT: lw t3, 152(sp) +; CHECK-RV32-F-NEXT: lw a7, 156(sp) +; CHECK-RV32-F-NEXT: lw a6, 160(sp) +; CHECK-RV32-F-NEXT: lw a5, 164(sp) +; CHECK-RV32-F-NEXT: lw a4, 168(sp) +; CHECK-RV32-F-NEXT: lw a3, 172(sp) +; CHECK-RV32-F-NEXT: lw a2, 176(sp) +; CHECK-RV32-F-NEXT: lw a1, 180(sp) +; CHECK-RV32-F-NEXT: lw a0, 184(sp) +; CHECK-RV32-F-NEXT: lw s0, 188(sp) +; CHECK-RV32-F-NEXT: lw t2, 192(sp) +; CHECK-RV32-F-NEXT: lw t1, 196(sp) +; CHECK-RV32-F-NEXT: lw t0, 200(sp) +; CHECK-RV32-F-NEXT: lw ra, 204(sp) +; CHECK-RV32-F-NEXT: addi sp, sp, 208 +; CHECK-RV32-F-NEXT: .cfi_def_cfa_offset 0 +; CHECK-RV32-F-NEXT: mret +; ; CHECK-RV32-FD-LABEL: foo_fp_double: ; CHECK-RV32-FD: # %bb.0: ; CHECK-RV32-FD-NEXT: addi sp, sp, -32 +; CHECK-RV32-FD-NEXT: .cfi_def_cfa_offset 32 ; CHECK-RV32-FD-NEXT: sw ra, 28(sp) ; CHECK-RV32-FD-NEXT: sw s0, 24(sp) ; CHECK-RV32-FD-NEXT: sw a0, 20(sp) ; CHECK-RV32-FD-NEXT: fsd ft0, 8(sp) ; CHECK-RV32-FD-NEXT: fsd ft1, 0(sp) +; CHECK-RV32-FD-NEXT: .cfi_offset ra, -4 +; CHECK-RV32-FD-NEXT: .cfi_offset s0, -8 +; CHECK-RV32-FD-NEXT: .cfi_offset a0, -12 +; CHECK-RV32-FD-NEXT: .cfi_offset ft0, -24 +; CHECK-RV32-FD-NEXT: .cfi_offset ft1, -32 ; CHECK-RV32-FD-NEXT: addi s0, sp, 32 +; CHECK-RV32-FD-NEXT: .cfi_def_cfa s0, 0 ; CHECK-RV32-FD-NEXT: lui a0, %hi(i) ; CHECK-RV32-FD-NEXT: fld ft0, %lo(i)(a0) ; CHECK-RV32-FD-NEXT: lui a0, %hi(h) @@ -198,14 +1002,15 @@ ; CHECK-RV32-FD-NEXT: fadd.d ft0, ft1, ft0 ; CHECK-RV32-FD-NEXT: lui a0, %hi(g) ; CHECK-RV32-FD-NEXT: fsd ft0, %lo(g)(a0) +; CHECK-RV32-FD-NEXT: .cfi_def_cfa sp, 32 ; CHECK-RV32-FD-NEXT: fld ft1, 0(sp) ; CHECK-RV32-FD-NEXT: fld ft0, 8(sp) ; CHECK-RV32-FD-NEXT: lw a0, 20(sp) ; CHECK-RV32-FD-NEXT: lw s0, 24(sp) ; CHECK-RV32-FD-NEXT: lw ra, 28(sp) ; CHECK-RV32-FD-NEXT: addi sp, sp, 32 +; CHECK-RV32-FD-NEXT: .cfi_def_cfa_offset 0 ; CHECK-RV32-FD-NEXT: mret -; %1 = load double, double* @h %2 = load double, double* @i %add = fadd double %1, %2 Index: test/CodeGen/RISCV/interrupt-attr.ll =================================================================== --- test/CodeGen/RISCV/interrupt-attr.ll +++ test/CodeGen/RISCV/interrupt-attr.ll @@ -19,6 +19,7 @@ define void @foo_user() #0 { ; CHECK-LABEL: foo_user: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: uret ret void } @@ -26,6 +27,7 @@ define void @foo_supervisor() #1 { ; CHECK-LABEL: foo_supervisor: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: sret ret void } @@ -33,6 +35,7 @@ define void @foo_machine() #2 { ; CHECK-LABEL: foo_machine: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: mret ret void } @@ -54,6 +57,7 @@ ; CHECK-RV32-LABEL: foo_with_call: ; CHECK-RV32: # %bb.0: ; CHECK-RV32-NEXT: addi sp, sp, -64 +; CHECK-RV32-NEXT: .cfi_def_cfa_offset 64 ; CHECK-RV32-NEXT: sw ra, 60(sp) ; CHECK-RV32-NEXT: sw t0, 56(sp) ; CHECK-RV32-NEXT: sw t1, 52(sp) @@ -70,6 +74,22 @@ ; CHECK-RV32-NEXT: sw t4, 8(sp) ; CHECK-RV32-NEXT: sw t5, 4(sp) ; CHECK-RV32-NEXT: sw t6, 0(sp) +; CHECK-RV32-NEXT: .cfi_offset ra, -4 +; CHECK-RV32-NEXT: .cfi_offset t0, -8 +; CHECK-RV32-NEXT: .cfi_offset t1, -12 +; CHECK-RV32-NEXT: .cfi_offset t2, -16 +; CHECK-RV32-NEXT: .cfi_offset a0, -20 +; CHECK-RV32-NEXT: .cfi_offset a1, -24 +; CHECK-RV32-NEXT: .cfi_offset a2, -28 +; CHECK-RV32-NEXT: .cfi_offset a3, -32 +; CHECK-RV32-NEXT: .cfi_offset a4, -36 +; CHECK-RV32-NEXT: .cfi_offset a5, -40 +; CHECK-RV32-NEXT: .cfi_offset a6, -44 +; CHECK-RV32-NEXT: .cfi_offset a7, -48 +; CHECK-RV32-NEXT: .cfi_offset t3, -52 +; CHECK-RV32-NEXT: .cfi_offset t4, -56 +; CHECK-RV32-NEXT: .cfi_offset t5, -60 +; CHECK-RV32-NEXT: .cfi_offset t6, -64 ; CHECK-RV32-NEXT: call otherfoo ; CHECK-RV32-NEXT: lw t6, 0(sp) ; CHECK-RV32-NEXT: lw t5, 4(sp) @@ -88,11 +108,13 @@ ; CHECK-RV32-NEXT: lw t0, 56(sp) ; CHECK-RV32-NEXT: lw ra, 60(sp) ; CHECK-RV32-NEXT: addi sp, sp, 64 +; CHECK-RV32-NEXT: .cfi_def_cfa_offset 0 ; CHECK-RV32-NEXT: mret ; ; CHECK-RV32-F-LABEL: foo_with_call: ; CHECK-RV32-F: # %bb.0: ; CHECK-RV32-F-NEXT: addi sp, sp, -192 +; CHECK-RV32-F-NEXT: .cfi_def_cfa_offset 192 ; CHECK-RV32-F-NEXT: sw ra, 188(sp) ; CHECK-RV32-F-NEXT: sw t0, 184(sp) ; CHECK-RV32-F-NEXT: sw t1, 180(sp) @@ -141,6 +163,54 @@ ; CHECK-RV32-F-NEXT: fsw fs9, 8(sp) ; CHECK-RV32-F-NEXT: fsw fs10, 4(sp) ; CHECK-RV32-F-NEXT: fsw fs11, 0(sp) +; CHECK-RV32-F-NEXT: .cfi_offset ra, -4 +; CHECK-RV32-F-NEXT: .cfi_offset t0, -8 +; CHECK-RV32-F-NEXT: .cfi_offset t1, -12 +; CHECK-RV32-F-NEXT: .cfi_offset t2, -16 +; CHECK-RV32-F-NEXT: .cfi_offset a0, -20 +; CHECK-RV32-F-NEXT: .cfi_offset a1, -24 +; CHECK-RV32-F-NEXT: .cfi_offset a2, -28 +; CHECK-RV32-F-NEXT: .cfi_offset a3, -32 +; CHECK-RV32-F-NEXT: .cfi_offset a4, -36 +; CHECK-RV32-F-NEXT: .cfi_offset a5, -40 +; CHECK-RV32-F-NEXT: .cfi_offset a6, -44 +; CHECK-RV32-F-NEXT: .cfi_offset a7, -48 +; CHECK-RV32-F-NEXT: .cfi_offset t3, -52 +; CHECK-RV32-F-NEXT: .cfi_offset t4, -56 +; CHECK-RV32-F-NEXT: .cfi_offset t5, -60 +; CHECK-RV32-F-NEXT: .cfi_offset t6, -64 +; CHECK-RV32-F-NEXT: .cfi_offset ft0, -68 +; CHECK-RV32-F-NEXT: .cfi_offset ft1, -72 +; CHECK-RV32-F-NEXT: .cfi_offset ft2, -76 +; CHECK-RV32-F-NEXT: .cfi_offset ft3, -80 +; CHECK-RV32-F-NEXT: .cfi_offset ft4, -84 +; CHECK-RV32-F-NEXT: .cfi_offset ft5, -88 +; CHECK-RV32-F-NEXT: .cfi_offset ft6, -92 +; CHECK-RV32-F-NEXT: .cfi_offset ft7, -96 +; CHECK-RV32-F-NEXT: .cfi_offset fa0, -100 +; CHECK-RV32-F-NEXT: .cfi_offset fa1, -104 +; CHECK-RV32-F-NEXT: .cfi_offset fa2, -108 +; CHECK-RV32-F-NEXT: .cfi_offset fa3, -112 +; CHECK-RV32-F-NEXT: .cfi_offset fa4, -116 +; CHECK-RV32-F-NEXT: .cfi_offset fa5, -120 +; CHECK-RV32-F-NEXT: .cfi_offset fa6, -124 +; CHECK-RV32-F-NEXT: .cfi_offset fa7, -128 +; CHECK-RV32-F-NEXT: .cfi_offset ft8, -132 +; CHECK-RV32-F-NEXT: .cfi_offset ft9, -136 +; CHECK-RV32-F-NEXT: .cfi_offset ft10, -140 +; CHECK-RV32-F-NEXT: .cfi_offset ft11, -144 +; CHECK-RV32-F-NEXT: .cfi_offset fs0, -148 +; CHECK-RV32-F-NEXT: .cfi_offset fs1, -152 +; CHECK-RV32-F-NEXT: .cfi_offset fs2, -156 +; CHECK-RV32-F-NEXT: .cfi_offset fs3, -160 +; CHECK-RV32-F-NEXT: .cfi_offset fs4, -164 +; CHECK-RV32-F-NEXT: .cfi_offset fs5, -168 +; CHECK-RV32-F-NEXT: .cfi_offset fs6, -172 +; CHECK-RV32-F-NEXT: .cfi_offset fs7, -176 +; CHECK-RV32-F-NEXT: .cfi_offset fs8, -180 +; CHECK-RV32-F-NEXT: .cfi_offset fs9, -184 +; CHECK-RV32-F-NEXT: .cfi_offset fs10, -188 +; CHECK-RV32-F-NEXT: .cfi_offset fs11, -192 ; CHECK-RV32-F-NEXT: call otherfoo ; CHECK-RV32-F-NEXT: flw fs11, 0(sp) ; CHECK-RV32-F-NEXT: flw fs10, 4(sp) @@ -191,11 +261,13 @@ ; CHECK-RV32-F-NEXT: lw t0, 184(sp) ; CHECK-RV32-F-NEXT: lw ra, 188(sp) ; CHECK-RV32-F-NEXT: addi sp, sp, 192 +; CHECK-RV32-F-NEXT: .cfi_def_cfa_offset 0 ; CHECK-RV32-F-NEXT: mret ; ; CHECK-RV32-FD-LABEL: foo_with_call: ; CHECK-RV32-FD: # %bb.0: ; CHECK-RV32-FD-NEXT: addi sp, sp, -320 +; CHECK-RV32-FD-NEXT: .cfi_def_cfa_offset 320 ; CHECK-RV32-FD-NEXT: sw ra, 316(sp) ; CHECK-RV32-FD-NEXT: sw t0, 312(sp) ; CHECK-RV32-FD-NEXT: sw t1, 308(sp) @@ -244,6 +316,54 @@ ; CHECK-RV32-FD-NEXT: fsd fs9, 16(sp) ; CHECK-RV32-FD-NEXT: fsd fs10, 8(sp) ; CHECK-RV32-FD-NEXT: fsd fs11, 0(sp) +; CHECK-RV32-FD-NEXT: .cfi_offset ra, -4 +; CHECK-RV32-FD-NEXT: .cfi_offset t0, -8 +; CHECK-RV32-FD-NEXT: .cfi_offset t1, -12 +; CHECK-RV32-FD-NEXT: .cfi_offset t2, -16 +; CHECK-RV32-FD-NEXT: .cfi_offset a0, -20 +; CHECK-RV32-FD-NEXT: .cfi_offset a1, -24 +; CHECK-RV32-FD-NEXT: .cfi_offset a2, -28 +; CHECK-RV32-FD-NEXT: .cfi_offset a3, -32 +; CHECK-RV32-FD-NEXT: .cfi_offset a4, -36 +; CHECK-RV32-FD-NEXT: .cfi_offset a5, -40 +; CHECK-RV32-FD-NEXT: .cfi_offset a6, -44 +; CHECK-RV32-FD-NEXT: .cfi_offset a7, -48 +; CHECK-RV32-FD-NEXT: .cfi_offset t3, -52 +; CHECK-RV32-FD-NEXT: .cfi_offset t4, -56 +; CHECK-RV32-FD-NEXT: .cfi_offset t5, -60 +; CHECK-RV32-FD-NEXT: .cfi_offset t6, -64 +; CHECK-RV32-FD-NEXT: .cfi_offset ft0, -72 +; CHECK-RV32-FD-NEXT: .cfi_offset ft1, -80 +; CHECK-RV32-FD-NEXT: .cfi_offset ft2, -88 +; CHECK-RV32-FD-NEXT: .cfi_offset ft3, -96 +; CHECK-RV32-FD-NEXT: .cfi_offset ft4, -104 +; CHECK-RV32-FD-NEXT: .cfi_offset ft5, -112 +; CHECK-RV32-FD-NEXT: .cfi_offset ft6, -120 +; CHECK-RV32-FD-NEXT: .cfi_offset ft7, -128 +; CHECK-RV32-FD-NEXT: .cfi_offset fa0, -136 +; CHECK-RV32-FD-NEXT: .cfi_offset fa1, -144 +; CHECK-RV32-FD-NEXT: .cfi_offset fa2, -152 +; CHECK-RV32-FD-NEXT: .cfi_offset fa3, -160 +; CHECK-RV32-FD-NEXT: .cfi_offset fa4, -168 +; CHECK-RV32-FD-NEXT: .cfi_offset fa5, -176 +; CHECK-RV32-FD-NEXT: .cfi_offset fa6, -184 +; CHECK-RV32-FD-NEXT: .cfi_offset fa7, -192 +; CHECK-RV32-FD-NEXT: .cfi_offset ft8, -200 +; CHECK-RV32-FD-NEXT: .cfi_offset ft9, -208 +; CHECK-RV32-FD-NEXT: .cfi_offset ft10, -216 +; CHECK-RV32-FD-NEXT: .cfi_offset ft11, -224 +; CHECK-RV32-FD-NEXT: .cfi_offset fs0, -232 +; CHECK-RV32-FD-NEXT: .cfi_offset fs1, -240 +; CHECK-RV32-FD-NEXT: .cfi_offset fs2, -248 +; CHECK-RV32-FD-NEXT: .cfi_offset fs3, -256 +; CHECK-RV32-FD-NEXT: .cfi_offset fs4, -264 +; CHECK-RV32-FD-NEXT: .cfi_offset fs5, -272 +; CHECK-RV32-FD-NEXT: .cfi_offset fs6, -280 +; CHECK-RV32-FD-NEXT: .cfi_offset fs7, -288 +; CHECK-RV32-FD-NEXT: .cfi_offset fs8, -296 +; CHECK-RV32-FD-NEXT: .cfi_offset fs9, -304 +; CHECK-RV32-FD-NEXT: .cfi_offset fs10, -312 +; CHECK-RV32-FD-NEXT: .cfi_offset fs11, -320 ; CHECK-RV32-FD-NEXT: call otherfoo ; CHECK-RV32-FD-NEXT: fld fs11, 0(sp) ; CHECK-RV32-FD-NEXT: fld fs10, 8(sp) @@ -294,11 +414,13 @@ ; CHECK-RV32-FD-NEXT: lw t0, 312(sp) ; CHECK-RV32-FD-NEXT: lw ra, 316(sp) ; CHECK-RV32-FD-NEXT: addi sp, sp, 320 +; CHECK-RV32-FD-NEXT: .cfi_def_cfa_offset 0 ; CHECK-RV32-FD-NEXT: mret ; ; CHECK-RV64-LABEL: foo_with_call: ; CHECK-RV64: # %bb.0: ; CHECK-RV64-NEXT: addi sp, sp, -128 +; CHECK-RV64-NEXT: .cfi_def_cfa_offset 128 ; CHECK-RV64-NEXT: sd ra, 120(sp) ; CHECK-RV64-NEXT: sd t0, 112(sp) ; CHECK-RV64-NEXT: sd t1, 104(sp) @@ -315,6 +437,22 @@ ; CHECK-RV64-NEXT: sd t4, 16(sp) ; CHECK-RV64-NEXT: sd t5, 8(sp) ; CHECK-RV64-NEXT: sd t6, 0(sp) +; CHECK-RV64-NEXT: .cfi_offset ra, -8 +; CHECK-RV64-NEXT: .cfi_offset t0, -16 +; CHECK-RV64-NEXT: .cfi_offset t1, -24 +; CHECK-RV64-NEXT: .cfi_offset t2, -32 +; CHECK-RV64-NEXT: .cfi_offset a0, -40 +; CHECK-RV64-NEXT: .cfi_offset a1, -48 +; CHECK-RV64-NEXT: .cfi_offset a2, -56 +; CHECK-RV64-NEXT: .cfi_offset a3, -64 +; CHECK-RV64-NEXT: .cfi_offset a4, -72 +; CHECK-RV64-NEXT: .cfi_offset a5, -80 +; CHECK-RV64-NEXT: .cfi_offset a6, -88 +; CHECK-RV64-NEXT: .cfi_offset a7, -96 +; CHECK-RV64-NEXT: .cfi_offset t3, -104 +; CHECK-RV64-NEXT: .cfi_offset t4, -112 +; CHECK-RV64-NEXT: .cfi_offset t5, -120 +; CHECK-RV64-NEXT: .cfi_offset t6, -128 ; CHECK-RV64-NEXT: call otherfoo ; CHECK-RV64-NEXT: ld t6, 0(sp) ; CHECK-RV64-NEXT: ld t5, 8(sp) @@ -333,11 +471,13 @@ ; CHECK-RV64-NEXT: ld t0, 112(sp) ; CHECK-RV64-NEXT: ld ra, 120(sp) ; CHECK-RV64-NEXT: addi sp, sp, 128 +; CHECK-RV64-NEXT: .cfi_def_cfa_offset 0 ; CHECK-RV64-NEXT: mret ; ; CHECK-RV64-F-LABEL: foo_with_call: ; CHECK-RV64-F: # %bb.0: ; CHECK-RV64-F-NEXT: addi sp, sp, -256 +; CHECK-RV64-F-NEXT: .cfi_def_cfa_offset 256 ; CHECK-RV64-F-NEXT: sd ra, 248(sp) ; CHECK-RV64-F-NEXT: sd t0, 240(sp) ; CHECK-RV64-F-NEXT: sd t1, 232(sp) @@ -386,6 +526,54 @@ ; CHECK-RV64-F-NEXT: fsw fs9, 8(sp) ; CHECK-RV64-F-NEXT: fsw fs10, 4(sp) ; CHECK-RV64-F-NEXT: fsw fs11, 0(sp) +; CHECK-RV64-F-NEXT: .cfi_offset ra, -8 +; CHECK-RV64-F-NEXT: .cfi_offset t0, -16 +; CHECK-RV64-F-NEXT: .cfi_offset t1, -24 +; CHECK-RV64-F-NEXT: .cfi_offset t2, -32 +; CHECK-RV64-F-NEXT: .cfi_offset a0, -40 +; CHECK-RV64-F-NEXT: .cfi_offset a1, -48 +; CHECK-RV64-F-NEXT: .cfi_offset a2, -56 +; CHECK-RV64-F-NEXT: .cfi_offset a3, -64 +; CHECK-RV64-F-NEXT: .cfi_offset a4, -72 +; CHECK-RV64-F-NEXT: .cfi_offset a5, -80 +; CHECK-RV64-F-NEXT: .cfi_offset a6, -88 +; CHECK-RV64-F-NEXT: .cfi_offset a7, -96 +; CHECK-RV64-F-NEXT: .cfi_offset t3, -104 +; CHECK-RV64-F-NEXT: .cfi_offset t4, -112 +; CHECK-RV64-F-NEXT: .cfi_offset t5, -120 +; CHECK-RV64-F-NEXT: .cfi_offset t6, -128 +; CHECK-RV64-F-NEXT: .cfi_offset ft0, -132 +; CHECK-RV64-F-NEXT: .cfi_offset ft1, -136 +; CHECK-RV64-F-NEXT: .cfi_offset ft2, -140 +; CHECK-RV64-F-NEXT: .cfi_offset ft3, -144 +; CHECK-RV64-F-NEXT: .cfi_offset ft4, -148 +; CHECK-RV64-F-NEXT: .cfi_offset ft5, -152 +; CHECK-RV64-F-NEXT: .cfi_offset ft6, -156 +; CHECK-RV64-F-NEXT: .cfi_offset ft7, -160 +; CHECK-RV64-F-NEXT: .cfi_offset fa0, -164 +; CHECK-RV64-F-NEXT: .cfi_offset fa1, -168 +; CHECK-RV64-F-NEXT: .cfi_offset fa2, -172 +; CHECK-RV64-F-NEXT: .cfi_offset fa3, -176 +; CHECK-RV64-F-NEXT: .cfi_offset fa4, -180 +; CHECK-RV64-F-NEXT: .cfi_offset fa5, -184 +; CHECK-RV64-F-NEXT: .cfi_offset fa6, -188 +; CHECK-RV64-F-NEXT: .cfi_offset fa7, -192 +; CHECK-RV64-F-NEXT: .cfi_offset ft8, -196 +; CHECK-RV64-F-NEXT: .cfi_offset ft9, -200 +; CHECK-RV64-F-NEXT: .cfi_offset ft10, -204 +; CHECK-RV64-F-NEXT: .cfi_offset ft11, -208 +; CHECK-RV64-F-NEXT: .cfi_offset fs0, -212 +; CHECK-RV64-F-NEXT: .cfi_offset fs1, -216 +; CHECK-RV64-F-NEXT: .cfi_offset fs2, -220 +; CHECK-RV64-F-NEXT: .cfi_offset fs3, -224 +; CHECK-RV64-F-NEXT: .cfi_offset fs4, -228 +; CHECK-RV64-F-NEXT: .cfi_offset fs5, -232 +; CHECK-RV64-F-NEXT: .cfi_offset fs6, -236 +; CHECK-RV64-F-NEXT: .cfi_offset fs7, -240 +; CHECK-RV64-F-NEXT: .cfi_offset fs8, -244 +; CHECK-RV64-F-NEXT: .cfi_offset fs9, -248 +; CHECK-RV64-F-NEXT: .cfi_offset fs10, -252 +; CHECK-RV64-F-NEXT: .cfi_offset fs11, -256 ; CHECK-RV64-F-NEXT: call otherfoo ; CHECK-RV64-F-NEXT: flw fs11, 0(sp) ; CHECK-RV64-F-NEXT: flw fs10, 4(sp) @@ -436,11 +624,13 @@ ; CHECK-RV64-F-NEXT: ld t0, 240(sp) ; CHECK-RV64-F-NEXT: ld ra, 248(sp) ; CHECK-RV64-F-NEXT: addi sp, sp, 256 +; CHECK-RV64-F-NEXT: .cfi_def_cfa_offset 0 ; CHECK-RV64-F-NEXT: mret ; ; CHECK-RV64-FD-LABEL: foo_with_call: ; CHECK-RV64-FD: # %bb.0: ; CHECK-RV64-FD-NEXT: addi sp, sp, -384 +; CHECK-RV64-FD-NEXT: .cfi_def_cfa_offset 384 ; CHECK-RV64-FD-NEXT: sd ra, 376(sp) ; CHECK-RV64-FD-NEXT: sd t0, 368(sp) ; CHECK-RV64-FD-NEXT: sd t1, 360(sp) @@ -489,6 +679,54 @@ ; CHECK-RV64-FD-NEXT: fsd fs9, 16(sp) ; CHECK-RV64-FD-NEXT: fsd fs10, 8(sp) ; CHECK-RV64-FD-NEXT: fsd fs11, 0(sp) +; CHECK-RV64-FD-NEXT: .cfi_offset ra, -8 +; CHECK-RV64-FD-NEXT: .cfi_offset t0, -16 +; CHECK-RV64-FD-NEXT: .cfi_offset t1, -24 +; CHECK-RV64-FD-NEXT: .cfi_offset t2, -32 +; CHECK-RV64-FD-NEXT: .cfi_offset a0, -40 +; CHECK-RV64-FD-NEXT: .cfi_offset a1, -48 +; CHECK-RV64-FD-NEXT: .cfi_offset a2, -56 +; CHECK-RV64-FD-NEXT: .cfi_offset a3, -64 +; CHECK-RV64-FD-NEXT: .cfi_offset a4, -72 +; CHECK-RV64-FD-NEXT: .cfi_offset a5, -80 +; CHECK-RV64-FD-NEXT: .cfi_offset a6, -88 +; CHECK-RV64-FD-NEXT: .cfi_offset a7, -96 +; CHECK-RV64-FD-NEXT: .cfi_offset t3, -104 +; CHECK-RV64-FD-NEXT: .cfi_offset t4, -112 +; CHECK-RV64-FD-NEXT: .cfi_offset t5, -120 +; CHECK-RV64-FD-NEXT: .cfi_offset t6, -128 +; CHECK-RV64-FD-NEXT: .cfi_offset ft0, -136 +; CHECK-RV64-FD-NEXT: .cfi_offset ft1, -144 +; CHECK-RV64-FD-NEXT: .cfi_offset ft2, -152 +; CHECK-RV64-FD-NEXT: .cfi_offset ft3, -160 +; CHECK-RV64-FD-NEXT: .cfi_offset ft4, -168 +; CHECK-RV64-FD-NEXT: .cfi_offset ft5, -176 +; CHECK-RV64-FD-NEXT: .cfi_offset ft6, -184 +; CHECK-RV64-FD-NEXT: .cfi_offset ft7, -192 +; CHECK-RV64-FD-NEXT: .cfi_offset fa0, -200 +; CHECK-RV64-FD-NEXT: .cfi_offset fa1, -208 +; CHECK-RV64-FD-NEXT: .cfi_offset fa2, -216 +; CHECK-RV64-FD-NEXT: .cfi_offset fa3, -224 +; CHECK-RV64-FD-NEXT: .cfi_offset fa4, -232 +; CHECK-RV64-FD-NEXT: .cfi_offset fa5, -240 +; CHECK-RV64-FD-NEXT: .cfi_offset fa6, -248 +; CHECK-RV64-FD-NEXT: .cfi_offset fa7, -256 +; CHECK-RV64-FD-NEXT: .cfi_offset ft8, -264 +; CHECK-RV64-FD-NEXT: .cfi_offset ft9, -272 +; CHECK-RV64-FD-NEXT: .cfi_offset ft10, -280 +; CHECK-RV64-FD-NEXT: .cfi_offset ft11, -288 +; CHECK-RV64-FD-NEXT: .cfi_offset fs0, -296 +; CHECK-RV64-FD-NEXT: .cfi_offset fs1, -304 +; CHECK-RV64-FD-NEXT: .cfi_offset fs2, -312 +; CHECK-RV64-FD-NEXT: .cfi_offset fs3, -320 +; CHECK-RV64-FD-NEXT: .cfi_offset fs4, -328 +; CHECK-RV64-FD-NEXT: .cfi_offset fs5, -336 +; CHECK-RV64-FD-NEXT: .cfi_offset fs6, -344 +; CHECK-RV64-FD-NEXT: .cfi_offset fs7, -352 +; CHECK-RV64-FD-NEXT: .cfi_offset fs8, -360 +; CHECK-RV64-FD-NEXT: .cfi_offset fs9, -368 +; CHECK-RV64-FD-NEXT: .cfi_offset fs10, -376 +; CHECK-RV64-FD-NEXT: .cfi_offset fs11, -384 ; CHECK-RV64-FD-NEXT: call otherfoo ; CHECK-RV64-FD-NEXT: fld fs11, 0(sp) ; CHECK-RV64-FD-NEXT: fld fs10, 8(sp) @@ -539,6 +777,7 @@ ; CHECK-RV64-FD-NEXT: ld t0, 368(sp) ; CHECK-RV64-FD-NEXT: ld ra, 376(sp) ; CHECK-RV64-FD-NEXT: addi sp, sp, 384 +; CHECK-RV64-FD-NEXT: .cfi_def_cfa_offset 0 ; CHECK-RV64-FD-NEXT: mret %call = call i32 bitcast (i32 (...)* @otherfoo to i32 ()*)() ret void @@ -552,6 +791,7 @@ ; CHECK-RV32-LABEL: foo_fp_with_call: ; CHECK-RV32: # %bb.0: ; CHECK-RV32-NEXT: addi sp, sp, -80 +; CHECK-RV32-NEXT: .cfi_def_cfa_offset 80 ; CHECK-RV32-NEXT: sw ra, 76(sp) ; CHECK-RV32-NEXT: sw t0, 72(sp) ; CHECK-RV32-NEXT: sw t1, 68(sp) @@ -569,8 +809,27 @@ ; CHECK-RV32-NEXT: sw t4, 20(sp) ; CHECK-RV32-NEXT: sw t5, 16(sp) ; CHECK-RV32-NEXT: sw t6, 12(sp) +; CHECK-RV32-NEXT: .cfi_offset ra, -4 +; CHECK-RV32-NEXT: .cfi_offset t0, -8 +; CHECK-RV32-NEXT: .cfi_offset t1, -12 +; CHECK-RV32-NEXT: .cfi_offset t2, -16 +; CHECK-RV32-NEXT: .cfi_offset s0, -20 +; CHECK-RV32-NEXT: .cfi_offset a0, -24 +; CHECK-RV32-NEXT: .cfi_offset a1, -28 +; CHECK-RV32-NEXT: .cfi_offset a2, -32 +; CHECK-RV32-NEXT: .cfi_offset a3, -36 +; CHECK-RV32-NEXT: .cfi_offset a4, -40 +; CHECK-RV32-NEXT: .cfi_offset a5, -44 +; CHECK-RV32-NEXT: .cfi_offset a6, -48 +; CHECK-RV32-NEXT: .cfi_offset a7, -52 +; CHECK-RV32-NEXT: .cfi_offset t3, -56 +; CHECK-RV32-NEXT: .cfi_offset t4, -60 +; CHECK-RV32-NEXT: .cfi_offset t5, -64 +; CHECK-RV32-NEXT: .cfi_offset t6, -68 ; CHECK-RV32-NEXT: addi s0, sp, 80 +; CHECK-RV32-NEXT: .cfi_def_cfa s0, 0 ; CHECK-RV32-NEXT: call otherfoo +; CHECK-RV32-NEXT: .cfi_def_cfa sp, 80 ; CHECK-RV32-NEXT: lw t6, 12(sp) ; CHECK-RV32-NEXT: lw t5, 16(sp) ; CHECK-RV32-NEXT: lw t4, 20(sp) @@ -589,11 +848,13 @@ ; CHECK-RV32-NEXT: lw t0, 72(sp) ; CHECK-RV32-NEXT: lw ra, 76(sp) ; CHECK-RV32-NEXT: addi sp, sp, 80 +; CHECK-RV32-NEXT: .cfi_def_cfa_offset 0 ; CHECK-RV32-NEXT: mret ; ; CHECK-RV32-F-LABEL: foo_fp_with_call: ; CHECK-RV32-F: # %bb.0: ; CHECK-RV32-F-NEXT: addi sp, sp, -208 +; CHECK-RV32-F-NEXT: .cfi_def_cfa_offset 208 ; CHECK-RV32-F-NEXT: sw ra, 204(sp) ; CHECK-RV32-F-NEXT: sw t0, 200(sp) ; CHECK-RV32-F-NEXT: sw t1, 196(sp) @@ -643,8 +904,59 @@ ; CHECK-RV32-F-NEXT: fsw fs9, 20(sp) ; CHECK-RV32-F-NEXT: fsw fs10, 16(sp) ; CHECK-RV32-F-NEXT: fsw fs11, 12(sp) +; CHECK-RV32-F-NEXT: .cfi_offset ra, -4 +; CHECK-RV32-F-NEXT: .cfi_offset t0, -8 +; CHECK-RV32-F-NEXT: .cfi_offset t1, -12 +; CHECK-RV32-F-NEXT: .cfi_offset t2, -16 +; CHECK-RV32-F-NEXT: .cfi_offset s0, -20 +; CHECK-RV32-F-NEXT: .cfi_offset a0, -24 +; CHECK-RV32-F-NEXT: .cfi_offset a1, -28 +; CHECK-RV32-F-NEXT: .cfi_offset a2, -32 +; CHECK-RV32-F-NEXT: .cfi_offset a3, -36 +; CHECK-RV32-F-NEXT: .cfi_offset a4, -40 +; CHECK-RV32-F-NEXT: .cfi_offset a5, -44 +; CHECK-RV32-F-NEXT: .cfi_offset a6, -48 +; CHECK-RV32-F-NEXT: .cfi_offset a7, -52 +; CHECK-RV32-F-NEXT: .cfi_offset t3, -56 +; CHECK-RV32-F-NEXT: .cfi_offset t4, -60 +; CHECK-RV32-F-NEXT: .cfi_offset t5, -64 +; CHECK-RV32-F-NEXT: .cfi_offset t6, -68 +; CHECK-RV32-F-NEXT: .cfi_offset ft0, -72 +; CHECK-RV32-F-NEXT: .cfi_offset ft1, -76 +; CHECK-RV32-F-NEXT: .cfi_offset ft2, -80 +; CHECK-RV32-F-NEXT: .cfi_offset ft3, -84 +; CHECK-RV32-F-NEXT: .cfi_offset ft4, -88 +; CHECK-RV32-F-NEXT: .cfi_offset ft5, -92 +; CHECK-RV32-F-NEXT: .cfi_offset ft6, -96 +; CHECK-RV32-F-NEXT: .cfi_offset ft7, -100 +; CHECK-RV32-F-NEXT: .cfi_offset fa0, -104 +; CHECK-RV32-F-NEXT: .cfi_offset fa1, -108 +; CHECK-RV32-F-NEXT: .cfi_offset fa2, -112 +; CHECK-RV32-F-NEXT: .cfi_offset fa3, -116 +; CHECK-RV32-F-NEXT: .cfi_offset fa4, -120 +; CHECK-RV32-F-NEXT: .cfi_offset fa5, -124 +; CHECK-RV32-F-NEXT: .cfi_offset fa6, -128 +; CHECK-RV32-F-NEXT: .cfi_offset fa7, -132 +; CHECK-RV32-F-NEXT: .cfi_offset ft8, -136 +; CHECK-RV32-F-NEXT: .cfi_offset ft9, -140 +; CHECK-RV32-F-NEXT: .cfi_offset ft10, -144 +; CHECK-RV32-F-NEXT: .cfi_offset ft11, -148 +; CHECK-RV32-F-NEXT: .cfi_offset fs0, -152 +; CHECK-RV32-F-NEXT: .cfi_offset fs1, -156 +; CHECK-RV32-F-NEXT: .cfi_offset fs2, -160 +; CHECK-RV32-F-NEXT: .cfi_offset fs3, -164 +; CHECK-RV32-F-NEXT: .cfi_offset fs4, -168 +; CHECK-RV32-F-NEXT: .cfi_offset fs5, -172 +; CHECK-RV32-F-NEXT: .cfi_offset fs6, -176 +; CHECK-RV32-F-NEXT: .cfi_offset fs7, -180 +; CHECK-RV32-F-NEXT: .cfi_offset fs8, -184 +; CHECK-RV32-F-NEXT: .cfi_offset fs9, -188 +; CHECK-RV32-F-NEXT: .cfi_offset fs10, -192 +; CHECK-RV32-F-NEXT: .cfi_offset fs11, -196 ; CHECK-RV32-F-NEXT: addi s0, sp, 208 +; CHECK-RV32-F-NEXT: .cfi_def_cfa s0, 0 ; CHECK-RV32-F-NEXT: call otherfoo +; CHECK-RV32-F-NEXT: .cfi_def_cfa sp, 208 ; CHECK-RV32-F-NEXT: flw fs11, 12(sp) ; CHECK-RV32-F-NEXT: flw fs10, 16(sp) ; CHECK-RV32-F-NEXT: flw fs9, 20(sp) @@ -695,11 +1007,13 @@ ; CHECK-RV32-F-NEXT: lw t0, 200(sp) ; CHECK-RV32-F-NEXT: lw ra, 204(sp) ; CHECK-RV32-F-NEXT: addi sp, sp, 208 +; CHECK-RV32-F-NEXT: .cfi_def_cfa_offset 0 ; CHECK-RV32-F-NEXT: mret ; ; CHECK-RV32-FD-LABEL: foo_fp_with_call: ; CHECK-RV32-FD: # %bb.0: ; CHECK-RV32-FD-NEXT: addi sp, sp, -336 +; CHECK-RV32-FD-NEXT: .cfi_def_cfa_offset 336 ; CHECK-RV32-FD-NEXT: sw ra, 332(sp) ; CHECK-RV32-FD-NEXT: sw t0, 328(sp) ; CHECK-RV32-FD-NEXT: sw t1, 324(sp) @@ -749,8 +1063,59 @@ ; CHECK-RV32-FD-NEXT: fsd fs9, 24(sp) ; CHECK-RV32-FD-NEXT: fsd fs10, 16(sp) ; CHECK-RV32-FD-NEXT: fsd fs11, 8(sp) +; CHECK-RV32-FD-NEXT: .cfi_offset ra, -4 +; CHECK-RV32-FD-NEXT: .cfi_offset t0, -8 +; CHECK-RV32-FD-NEXT: .cfi_offset t1, -12 +; CHECK-RV32-FD-NEXT: .cfi_offset t2, -16 +; CHECK-RV32-FD-NEXT: .cfi_offset s0, -20 +; CHECK-RV32-FD-NEXT: .cfi_offset a0, -24 +; CHECK-RV32-FD-NEXT: .cfi_offset a1, -28 +; CHECK-RV32-FD-NEXT: .cfi_offset a2, -32 +; CHECK-RV32-FD-NEXT: .cfi_offset a3, -36 +; CHECK-RV32-FD-NEXT: .cfi_offset a4, -40 +; CHECK-RV32-FD-NEXT: .cfi_offset a5, -44 +; CHECK-RV32-FD-NEXT: .cfi_offset a6, -48 +; CHECK-RV32-FD-NEXT: .cfi_offset a7, -52 +; CHECK-RV32-FD-NEXT: .cfi_offset t3, -56 +; CHECK-RV32-FD-NEXT: .cfi_offset t4, -60 +; CHECK-RV32-FD-NEXT: .cfi_offset t5, -64 +; CHECK-RV32-FD-NEXT: .cfi_offset t6, -68 +; CHECK-RV32-FD-NEXT: .cfi_offset ft0, -80 +; CHECK-RV32-FD-NEXT: .cfi_offset ft1, -88 +; CHECK-RV32-FD-NEXT: .cfi_offset ft2, -96 +; CHECK-RV32-FD-NEXT: .cfi_offset ft3, -104 +; CHECK-RV32-FD-NEXT: .cfi_offset ft4, -112 +; CHECK-RV32-FD-NEXT: .cfi_offset ft5, -120 +; CHECK-RV32-FD-NEXT: .cfi_offset ft6, -128 +; CHECK-RV32-FD-NEXT: .cfi_offset ft7, -136 +; CHECK-RV32-FD-NEXT: .cfi_offset fa0, -144 +; CHECK-RV32-FD-NEXT: .cfi_offset fa1, -152 +; CHECK-RV32-FD-NEXT: .cfi_offset fa2, -160 +; CHECK-RV32-FD-NEXT: .cfi_offset fa3, -168 +; CHECK-RV32-FD-NEXT: .cfi_offset fa4, -176 +; CHECK-RV32-FD-NEXT: .cfi_offset fa5, -184 +; CHECK-RV32-FD-NEXT: .cfi_offset fa6, -192 +; CHECK-RV32-FD-NEXT: .cfi_offset fa7, -200 +; CHECK-RV32-FD-NEXT: .cfi_offset ft8, -208 +; CHECK-RV32-FD-NEXT: .cfi_offset ft9, -216 +; CHECK-RV32-FD-NEXT: .cfi_offset ft10, -224 +; CHECK-RV32-FD-NEXT: .cfi_offset ft11, -232 +; CHECK-RV32-FD-NEXT: .cfi_offset fs0, -240 +; CHECK-RV32-FD-NEXT: .cfi_offset fs1, -248 +; CHECK-RV32-FD-NEXT: .cfi_offset fs2, -256 +; CHECK-RV32-FD-NEXT: .cfi_offset fs3, -264 +; CHECK-RV32-FD-NEXT: .cfi_offset fs4, -272 +; CHECK-RV32-FD-NEXT: .cfi_offset fs5, -280 +; CHECK-RV32-FD-NEXT: .cfi_offset fs6, -288 +; CHECK-RV32-FD-NEXT: .cfi_offset fs7, -296 +; CHECK-RV32-FD-NEXT: .cfi_offset fs8, -304 +; CHECK-RV32-FD-NEXT: .cfi_offset fs9, -312 +; CHECK-RV32-FD-NEXT: .cfi_offset fs10, -320 +; CHECK-RV32-FD-NEXT: .cfi_offset fs11, -328 ; CHECK-RV32-FD-NEXT: addi s0, sp, 336 +; CHECK-RV32-FD-NEXT: .cfi_def_cfa s0, 0 ; CHECK-RV32-FD-NEXT: call otherfoo +; CHECK-RV32-FD-NEXT: .cfi_def_cfa sp, 336 ; CHECK-RV32-FD-NEXT: fld fs11, 8(sp) ; CHECK-RV32-FD-NEXT: fld fs10, 16(sp) ; CHECK-RV32-FD-NEXT: fld fs9, 24(sp) @@ -801,11 +1166,13 @@ ; CHECK-RV32-FD-NEXT: lw t0, 328(sp) ; CHECK-RV32-FD-NEXT: lw ra, 332(sp) ; CHECK-RV32-FD-NEXT: addi sp, sp, 336 +; CHECK-RV32-FD-NEXT: .cfi_def_cfa_offset 0 ; CHECK-RV32-FD-NEXT: mret ; ; CHECK-RV64-LABEL: foo_fp_with_call: ; CHECK-RV64: # %bb.0: ; CHECK-RV64-NEXT: addi sp, sp, -144 +; CHECK-RV64-NEXT: .cfi_def_cfa_offset 144 ; CHECK-RV64-NEXT: sd ra, 136(sp) ; CHECK-RV64-NEXT: sd t0, 128(sp) ; CHECK-RV64-NEXT: sd t1, 120(sp) @@ -823,8 +1190,27 @@ ; CHECK-RV64-NEXT: sd t4, 24(sp) ; CHECK-RV64-NEXT: sd t5, 16(sp) ; CHECK-RV64-NEXT: sd t6, 8(sp) +; CHECK-RV64-NEXT: .cfi_offset ra, -8 +; CHECK-RV64-NEXT: .cfi_offset t0, -16 +; CHECK-RV64-NEXT: .cfi_offset t1, -24 +; CHECK-RV64-NEXT: .cfi_offset t2, -32 +; CHECK-RV64-NEXT: .cfi_offset s0, -40 +; CHECK-RV64-NEXT: .cfi_offset a0, -48 +; CHECK-RV64-NEXT: .cfi_offset a1, -56 +; CHECK-RV64-NEXT: .cfi_offset a2, -64 +; CHECK-RV64-NEXT: .cfi_offset a3, -72 +; CHECK-RV64-NEXT: .cfi_offset a4, -80 +; CHECK-RV64-NEXT: .cfi_offset a5, -88 +; CHECK-RV64-NEXT: .cfi_offset a6, -96 +; CHECK-RV64-NEXT: .cfi_offset a7, -104 +; CHECK-RV64-NEXT: .cfi_offset t3, -112 +; CHECK-RV64-NEXT: .cfi_offset t4, -120 +; CHECK-RV64-NEXT: .cfi_offset t5, -128 +; CHECK-RV64-NEXT: .cfi_offset t6, -136 ; CHECK-RV64-NEXT: addi s0, sp, 144 +; CHECK-RV64-NEXT: .cfi_def_cfa s0, 0 ; CHECK-RV64-NEXT: call otherfoo +; CHECK-RV64-NEXT: .cfi_def_cfa sp, 144 ; CHECK-RV64-NEXT: ld t6, 8(sp) ; CHECK-RV64-NEXT: ld t5, 16(sp) ; CHECK-RV64-NEXT: ld t4, 24(sp) @@ -843,11 +1229,13 @@ ; CHECK-RV64-NEXT: ld t0, 128(sp) ; CHECK-RV64-NEXT: ld ra, 136(sp) ; CHECK-RV64-NEXT: addi sp, sp, 144 +; CHECK-RV64-NEXT: .cfi_def_cfa_offset 0 ; CHECK-RV64-NEXT: mret ; ; CHECK-RV64-F-LABEL: foo_fp_with_call: ; CHECK-RV64-F: # %bb.0: ; CHECK-RV64-F-NEXT: addi sp, sp, -272 +; CHECK-RV64-F-NEXT: .cfi_def_cfa_offset 272 ; CHECK-RV64-F-NEXT: sd ra, 264(sp) ; CHECK-RV64-F-NEXT: sd t0, 256(sp) ; CHECK-RV64-F-NEXT: sd t1, 248(sp) @@ -897,8 +1285,59 @@ ; CHECK-RV64-F-NEXT: fsw fs9, 16(sp) ; CHECK-RV64-F-NEXT: fsw fs10, 12(sp) ; CHECK-RV64-F-NEXT: fsw fs11, 8(sp) +; CHECK-RV64-F-NEXT: .cfi_offset ra, -8 +; CHECK-RV64-F-NEXT: .cfi_offset t0, -16 +; CHECK-RV64-F-NEXT: .cfi_offset t1, -24 +; CHECK-RV64-F-NEXT: .cfi_offset t2, -32 +; CHECK-RV64-F-NEXT: .cfi_offset s0, -40 +; CHECK-RV64-F-NEXT: .cfi_offset a0, -48 +; CHECK-RV64-F-NEXT: .cfi_offset a1, -56 +; CHECK-RV64-F-NEXT: .cfi_offset a2, -64 +; CHECK-RV64-F-NEXT: .cfi_offset a3, -72 +; CHECK-RV64-F-NEXT: .cfi_offset a4, -80 +; CHECK-RV64-F-NEXT: .cfi_offset a5, -88 +; CHECK-RV64-F-NEXT: .cfi_offset a6, -96 +; CHECK-RV64-F-NEXT: .cfi_offset a7, -104 +; CHECK-RV64-F-NEXT: .cfi_offset t3, -112 +; CHECK-RV64-F-NEXT: .cfi_offset t4, -120 +; CHECK-RV64-F-NEXT: .cfi_offset t5, -128 +; CHECK-RV64-F-NEXT: .cfi_offset t6, -136 +; CHECK-RV64-F-NEXT: .cfi_offset ft0, -140 +; CHECK-RV64-F-NEXT: .cfi_offset ft1, -144 +; CHECK-RV64-F-NEXT: .cfi_offset ft2, -148 +; CHECK-RV64-F-NEXT: .cfi_offset ft3, -152 +; CHECK-RV64-F-NEXT: .cfi_offset ft4, -156 +; CHECK-RV64-F-NEXT: .cfi_offset ft5, -160 +; CHECK-RV64-F-NEXT: .cfi_offset ft6, -164 +; CHECK-RV64-F-NEXT: .cfi_offset ft7, -168 +; CHECK-RV64-F-NEXT: .cfi_offset fa0, -172 +; CHECK-RV64-F-NEXT: .cfi_offset fa1, -176 +; CHECK-RV64-F-NEXT: .cfi_offset fa2, -180 +; CHECK-RV64-F-NEXT: .cfi_offset fa3, -184 +; CHECK-RV64-F-NEXT: .cfi_offset fa4, -188 +; CHECK-RV64-F-NEXT: .cfi_offset fa5, -192 +; CHECK-RV64-F-NEXT: .cfi_offset fa6, -196 +; CHECK-RV64-F-NEXT: .cfi_offset fa7, -200 +; CHECK-RV64-F-NEXT: .cfi_offset ft8, -204 +; CHECK-RV64-F-NEXT: .cfi_offset ft9, -208 +; CHECK-RV64-F-NEXT: .cfi_offset ft10, -212 +; CHECK-RV64-F-NEXT: .cfi_offset ft11, -216 +; CHECK-RV64-F-NEXT: .cfi_offset fs0, -220 +; CHECK-RV64-F-NEXT: .cfi_offset fs1, -224 +; CHECK-RV64-F-NEXT: .cfi_offset fs2, -228 +; CHECK-RV64-F-NEXT: .cfi_offset fs3, -232 +; CHECK-RV64-F-NEXT: .cfi_offset fs4, -236 +; CHECK-RV64-F-NEXT: .cfi_offset fs5, -240 +; CHECK-RV64-F-NEXT: .cfi_offset fs6, -244 +; CHECK-RV64-F-NEXT: .cfi_offset fs7, -248 +; CHECK-RV64-F-NEXT: .cfi_offset fs8, -252 +; CHECK-RV64-F-NEXT: .cfi_offset fs9, -256 +; CHECK-RV64-F-NEXT: .cfi_offset fs10, -260 +; CHECK-RV64-F-NEXT: .cfi_offset fs11, -264 ; CHECK-RV64-F-NEXT: addi s0, sp, 272 +; CHECK-RV64-F-NEXT: .cfi_def_cfa s0, 0 ; CHECK-RV64-F-NEXT: call otherfoo +; CHECK-RV64-F-NEXT: .cfi_def_cfa sp, 272 ; CHECK-RV64-F-NEXT: flw fs11, 8(sp) ; CHECK-RV64-F-NEXT: flw fs10, 12(sp) ; CHECK-RV64-F-NEXT: flw fs9, 16(sp) @@ -949,11 +1388,13 @@ ; CHECK-RV64-F-NEXT: ld t0, 256(sp) ; CHECK-RV64-F-NEXT: ld ra, 264(sp) ; CHECK-RV64-F-NEXT: addi sp, sp, 272 +; CHECK-RV64-F-NEXT: .cfi_def_cfa_offset 0 ; CHECK-RV64-F-NEXT: mret ; ; CHECK-RV64-FD-LABEL: foo_fp_with_call: ; CHECK-RV64-FD: # %bb.0: ; CHECK-RV64-FD-NEXT: addi sp, sp, -400 +; CHECK-RV64-FD-NEXT: .cfi_def_cfa_offset 400 ; CHECK-RV64-FD-NEXT: sd ra, 392(sp) ; CHECK-RV64-FD-NEXT: sd t0, 384(sp) ; CHECK-RV64-FD-NEXT: sd t1, 376(sp) @@ -1003,8 +1444,59 @@ ; CHECK-RV64-FD-NEXT: fsd fs9, 24(sp) ; CHECK-RV64-FD-NEXT: fsd fs10, 16(sp) ; CHECK-RV64-FD-NEXT: fsd fs11, 8(sp) +; CHECK-RV64-FD-NEXT: .cfi_offset ra, -8 +; CHECK-RV64-FD-NEXT: .cfi_offset t0, -16 +; CHECK-RV64-FD-NEXT: .cfi_offset t1, -24 +; CHECK-RV64-FD-NEXT: .cfi_offset t2, -32 +; CHECK-RV64-FD-NEXT: .cfi_offset s0, -40 +; CHECK-RV64-FD-NEXT: .cfi_offset a0, -48 +; CHECK-RV64-FD-NEXT: .cfi_offset a1, -56 +; CHECK-RV64-FD-NEXT: .cfi_offset a2, -64 +; CHECK-RV64-FD-NEXT: .cfi_offset a3, -72 +; CHECK-RV64-FD-NEXT: .cfi_offset a4, -80 +; CHECK-RV64-FD-NEXT: .cfi_offset a5, -88 +; CHECK-RV64-FD-NEXT: .cfi_offset a6, -96 +; CHECK-RV64-FD-NEXT: .cfi_offset a7, -104 +; CHECK-RV64-FD-NEXT: .cfi_offset t3, -112 +; CHECK-RV64-FD-NEXT: .cfi_offset t4, -120 +; CHECK-RV64-FD-NEXT: .cfi_offset t5, -128 +; CHECK-RV64-FD-NEXT: .cfi_offset t6, -136 +; CHECK-RV64-FD-NEXT: .cfi_offset ft0, -144 +; CHECK-RV64-FD-NEXT: .cfi_offset ft1, -152 +; CHECK-RV64-FD-NEXT: .cfi_offset ft2, -160 +; CHECK-RV64-FD-NEXT: .cfi_offset ft3, -168 +; CHECK-RV64-FD-NEXT: .cfi_offset ft4, -176 +; CHECK-RV64-FD-NEXT: .cfi_offset ft5, -184 +; CHECK-RV64-FD-NEXT: .cfi_offset ft6, -192 +; CHECK-RV64-FD-NEXT: .cfi_offset ft7, -200 +; CHECK-RV64-FD-NEXT: .cfi_offset fa0, -208 +; CHECK-RV64-FD-NEXT: .cfi_offset fa1, -216 +; CHECK-RV64-FD-NEXT: .cfi_offset fa2, -224 +; CHECK-RV64-FD-NEXT: .cfi_offset fa3, -232 +; CHECK-RV64-FD-NEXT: .cfi_offset fa4, -240 +; CHECK-RV64-FD-NEXT: .cfi_offset fa5, -248 +; CHECK-RV64-FD-NEXT: .cfi_offset fa6, -256 +; CHECK-RV64-FD-NEXT: .cfi_offset fa7, -264 +; CHECK-RV64-FD-NEXT: .cfi_offset ft8, -272 +; CHECK-RV64-FD-NEXT: .cfi_offset ft9, -280 +; CHECK-RV64-FD-NEXT: .cfi_offset ft10, -288 +; CHECK-RV64-FD-NEXT: .cfi_offset ft11, -296 +; CHECK-RV64-FD-NEXT: .cfi_offset fs0, -304 +; CHECK-RV64-FD-NEXT: .cfi_offset fs1, -312 +; CHECK-RV64-FD-NEXT: .cfi_offset fs2, -320 +; CHECK-RV64-FD-NEXT: .cfi_offset fs3, -328 +; CHECK-RV64-FD-NEXT: .cfi_offset fs4, -336 +; CHECK-RV64-FD-NEXT: .cfi_offset fs5, -344 +; CHECK-RV64-FD-NEXT: .cfi_offset fs6, -352 +; CHECK-RV64-FD-NEXT: .cfi_offset fs7, -360 +; CHECK-RV64-FD-NEXT: .cfi_offset fs8, -368 +; CHECK-RV64-FD-NEXT: .cfi_offset fs9, -376 +; CHECK-RV64-FD-NEXT: .cfi_offset fs10, -384 +; CHECK-RV64-FD-NEXT: .cfi_offset fs11, -392 ; CHECK-RV64-FD-NEXT: addi s0, sp, 400 +; CHECK-RV64-FD-NEXT: .cfi_def_cfa s0, 0 ; CHECK-RV64-FD-NEXT: call otherfoo +; CHECK-RV64-FD-NEXT: .cfi_def_cfa sp, 400 ; CHECK-RV64-FD-NEXT: fld fs11, 8(sp) ; CHECK-RV64-FD-NEXT: fld fs10, 16(sp) ; CHECK-RV64-FD-NEXT: fld fs9, 24(sp) @@ -1055,6 +1547,7 @@ ; CHECK-RV64-FD-NEXT: ld t0, 384(sp) ; CHECK-RV64-FD-NEXT: ld ra, 392(sp) ; CHECK-RV64-FD-NEXT: addi sp, sp, 400 +; CHECK-RV64-FD-NEXT: .cfi_def_cfa_offset 0 ; CHECK-RV64-FD-NEXT: mret %call = call i32 bitcast (i32 (...)* @otherfoo to i32 ()*)() ret void Index: test/CodeGen/RISCV/jumptable.ll =================================================================== --- test/CodeGen/RISCV/jumptable.ll +++ test/CodeGen/RISCV/jumptable.ll @@ -33,6 +33,7 @@ ; RV32I-NEXT: .LBB0_9: # %exit ; RV32I-NEXT: sw a0, 0(a1) ; RV32I-NEXT: .LBB0_10: # %exit +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret entry: switch i32 %in, label %exit [ Index: test/CodeGen/RISCV/legalize-fneg.ll =================================================================== --- test/CodeGen/RISCV/legalize-fneg.ll +++ test/CodeGen/RISCV/legalize-fneg.ll @@ -11,6 +11,7 @@ ; RV32-NEXT: lui a2, 524288 ; RV32-NEXT: xor a1, a1, a2 ; RV32-NEXT: sw a1, 0(a0) +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: test1: @@ -20,6 +21,7 @@ ; RV64-NEXT: lw a1, 0(a1) ; RV64-NEXT: xor a1, a1, a2 ; RV64-NEXT: sw a1, 0(a0) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret entry: %0 = load float, float* %b @@ -37,6 +39,7 @@ ; RV32-NEXT: lui a1, 524288 ; RV32-NEXT: xor a1, a2, a1 ; RV32-NEXT: sw a1, 4(a0) +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: test2: @@ -46,6 +49,7 @@ ; RV64-NEXT: ld a1, 0(a1) ; RV64-NEXT: xor a1, a1, a2 ; RV64-NEXT: sd a1, 0(a0) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret entry: %0 = load double, double* %b @@ -67,6 +71,7 @@ ; RV32-NEXT: lui a1, 524288 ; RV32-NEXT: xor a1, a2, a1 ; RV32-NEXT: sw a1, 12(a0) +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: test3: @@ -78,6 +83,7 @@ ; RV64-NEXT: slli a1, a1, 63 ; RV64-NEXT: xor a1, a2, a1 ; RV64-NEXT: sd a1, 8(a0) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret entry: %0 = load fp128, fp128* %b Index: test/CodeGen/RISCV/rotl-rotr.ll =================================================================== --- test/CodeGen/RISCV/rotl-rotr.ll +++ test/CodeGen/RISCV/rotl-rotr.ll @@ -13,6 +13,7 @@ ; RV32I-NEXT: sll a1, a0, a1 ; RV32I-NEXT: srl a0, a0, a2 ; RV32I-NEXT: or a0, a1, a0 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret %z = sub i32 32, %y %b = shl i32 %x, %y @@ -29,6 +30,7 @@ ; RV32I-NEXT: srl a1, a0, a1 ; RV32I-NEXT: sll a0, a0, a2 ; RV32I-NEXT: or a0, a1, a0 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret %z = sub i32 32, %y %b = lshr i32 %x, %y Index: test/CodeGen/RISCV/rv64i-tricky-shifts.ll =================================================================== --- test/CodeGen/RISCV/rv64i-tricky-shifts.ll +++ test/CodeGen/RISCV/rv64i-tricky-shifts.ll @@ -12,6 +12,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: sll a0, a0, a1 ; RV64I-NEXT: sext.w a0, a0 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = shl i64 %a, %b %2 = shl i64 %1, 32 @@ -25,6 +26,7 @@ ; RV64I-NEXT: slli a0, a0, 32 ; RV64I-NEXT: srli a0, a0, 32 ; RV64I-NEXT: srl a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = and i64 %a, 4294967295 %2 = lshr i64 %1, %b @@ -36,6 +38,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: sext.w a0, a0 ; RV64I-NEXT: sra a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = shl i64 %a, 32 %2 = ashr i64 %1, 32 Index: test/CodeGen/RISCV/select-cc.ll =================================================================== --- test/CodeGen/RISCV/select-cc.ll +++ test/CodeGen/RISCV/select-cc.ll @@ -55,6 +55,7 @@ ; RV32I-NEXT: # %bb.19: ; RV32I-NEXT: mv a0, a1 ; RV32I-NEXT: .LBB0_20: +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret %val1 = load volatile i32, i32* %b %tst1 = icmp eq i32 %a, %val1 Index: test/CodeGen/RISCV/sext-zext-trunc.ll =================================================================== --- test/CodeGen/RISCV/sext-zext-trunc.ll +++ test/CodeGen/RISCV/sext-zext-trunc.ll @@ -9,12 +9,14 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: andi a0, a0, 1 ; RV32I-NEXT: neg a0, a0 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: sext_i1_to_i8: ; RV64I: # %bb.0: ; RV64I-NEXT: andi a0, a0, 1 ; RV64I-NEXT: neg a0, a0 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = sext i1 %a to i8 ret i8 %1 @@ -25,12 +27,14 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: andi a0, a0, 1 ; RV32I-NEXT: neg a0, a0 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: sext_i1_to_i16: ; RV64I: # %bb.0: ; RV64I-NEXT: andi a0, a0, 1 ; RV64I-NEXT: neg a0, a0 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = sext i1 %a to i16 ret i16 %1 @@ -41,12 +45,14 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: andi a0, a0, 1 ; RV32I-NEXT: neg a0, a0 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: sext_i1_to_i32: ; RV64I: # %bb.0: ; RV64I-NEXT: andi a0, a0, 1 ; RV64I-NEXT: neg a0, a0 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = sext i1 %a to i32 ret i32 %1 @@ -58,12 +64,14 @@ ; RV32I-NEXT: andi a0, a0, 1 ; RV32I-NEXT: neg a0, a0 ; RV32I-NEXT: mv a1, a0 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: sext_i1_to_i64: ; RV64I: # %bb.0: ; RV64I-NEXT: andi a0, a0, 1 ; RV64I-NEXT: neg a0, a0 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = sext i1 %a to i64 ret i64 %1 @@ -74,12 +82,14 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: slli a0, a0, 24 ; RV32I-NEXT: srai a0, a0, 24 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: sext_i8_to_i16: ; RV64I: # %bb.0: ; RV64I-NEXT: slli a0, a0, 56 ; RV64I-NEXT: srai a0, a0, 56 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = sext i8 %a to i16 ret i16 %1 @@ -90,12 +100,14 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: slli a0, a0, 24 ; RV32I-NEXT: srai a0, a0, 24 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: sext_i8_to_i32: ; RV64I: # %bb.0: ; RV64I-NEXT: slli a0, a0, 56 ; RV64I-NEXT: srai a0, a0, 56 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = sext i8 %a to i32 ret i32 %1 @@ -107,12 +119,14 @@ ; RV32I-NEXT: slli a1, a0, 24 ; RV32I-NEXT: srai a0, a1, 24 ; RV32I-NEXT: srai a1, a1, 31 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: sext_i8_to_i64: ; RV64I: # %bb.0: ; RV64I-NEXT: slli a0, a0, 56 ; RV64I-NEXT: srai a0, a0, 56 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = sext i8 %a to i64 ret i64 %1 @@ -123,12 +137,14 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: slli a0, a0, 16 ; RV32I-NEXT: srai a0, a0, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: sext_i16_to_i32: ; RV64I: # %bb.0: ; RV64I-NEXT: slli a0, a0, 48 ; RV64I-NEXT: srai a0, a0, 48 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = sext i16 %a to i32 ret i32 %1 @@ -140,12 +156,14 @@ ; RV32I-NEXT: slli a1, a0, 16 ; RV32I-NEXT: srai a0, a1, 16 ; RV32I-NEXT: srai a1, a1, 31 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: sext_i16_to_i64: ; RV64I: # %bb.0: ; RV64I-NEXT: slli a0, a0, 48 ; RV64I-NEXT: srai a0, a0, 48 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = sext i16 %a to i64 ret i64 %1 @@ -155,11 +173,13 @@ ; RV32I-LABEL: sext_i32_to_i64: ; RV32I: # %bb.0: ; RV32I-NEXT: srai a1, a0, 31 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: sext_i32_to_i64: ; RV64I: # %bb.0: ; RV64I-NEXT: sext.w a0, a0 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = sext i32 %a to i64 ret i64 %1 @@ -169,11 +189,13 @@ ; RV32I-LABEL: zext_i1_to_i8: ; RV32I: # %bb.0: ; RV32I-NEXT: andi a0, a0, 1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: zext_i1_to_i8: ; RV64I: # %bb.0: ; RV64I-NEXT: andi a0, a0, 1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = zext i1 %a to i8 ret i8 %1 @@ -183,11 +205,13 @@ ; RV32I-LABEL: zext_i1_to_i16: ; RV32I: # %bb.0: ; RV32I-NEXT: andi a0, a0, 1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: zext_i1_to_i16: ; RV64I: # %bb.0: ; RV64I-NEXT: andi a0, a0, 1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = zext i1 %a to i16 ret i16 %1 @@ -197,11 +221,13 @@ ; RV32I-LABEL: zext_i1_to_i32: ; RV32I: # %bb.0: ; RV32I-NEXT: andi a0, a0, 1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: zext_i1_to_i32: ; RV64I: # %bb.0: ; RV64I-NEXT: andi a0, a0, 1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = zext i1 %a to i32 ret i32 %1 @@ -212,11 +238,13 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: andi a0, a0, 1 ; RV32I-NEXT: mv a1, zero +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: zext_i1_to_i64: ; RV64I: # %bb.0: ; RV64I-NEXT: andi a0, a0, 1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = zext i1 %a to i64 ret i64 %1 @@ -226,11 +254,13 @@ ; RV32I-LABEL: zext_i8_to_i16: ; RV32I: # %bb.0: ; RV32I-NEXT: andi a0, a0, 255 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: zext_i8_to_i16: ; RV64I: # %bb.0: ; RV64I-NEXT: andi a0, a0, 255 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = zext i8 %a to i16 ret i16 %1 @@ -240,11 +270,13 @@ ; RV32I-LABEL: zext_i8_to_i32: ; RV32I: # %bb.0: ; RV32I-NEXT: andi a0, a0, 255 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: zext_i8_to_i32: ; RV64I: # %bb.0: ; RV64I-NEXT: andi a0, a0, 255 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = zext i8 %a to i32 ret i32 %1 @@ -255,11 +287,13 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: andi a0, a0, 255 ; RV32I-NEXT: mv a1, zero +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: zext_i8_to_i64: ; RV64I: # %bb.0: ; RV64I-NEXT: andi a0, a0, 255 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = zext i8 %a to i64 ret i64 %1 @@ -271,6 +305,7 @@ ; RV32I-NEXT: lui a1, 16 ; RV32I-NEXT: addi a1, a1, -1 ; RV32I-NEXT: and a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: zext_i16_to_i32: @@ -278,6 +313,7 @@ ; RV64I-NEXT: lui a1, 16 ; RV64I-NEXT: addiw a1, a1, -1 ; RV64I-NEXT: and a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = zext i16 %a to i32 ret i32 %1 @@ -290,6 +326,7 @@ ; RV32I-NEXT: addi a1, a1, -1 ; RV32I-NEXT: and a0, a0, a1 ; RV32I-NEXT: mv a1, zero +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: zext_i16_to_i64: @@ -297,6 +334,7 @@ ; RV64I-NEXT: lui a1, 16 ; RV64I-NEXT: addiw a1, a1, -1 ; RV64I-NEXT: and a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = zext i16 %a to i64 ret i64 %1 @@ -306,12 +344,14 @@ ; RV32I-LABEL: zext_i32_to_i64: ; RV32I: # %bb.0: ; RV32I-NEXT: mv a1, zero +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: zext_i32_to_i64: ; RV64I: # %bb.0: ; RV64I-NEXT: slli a0, a0, 32 ; RV64I-NEXT: srli a0, a0, 32 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = zext i32 %a to i64 ret i64 %1 @@ -320,10 +360,12 @@ define i1 @trunc_i8_to_i1(i8 %a) { ; RV32I-LABEL: trunc_i8_to_i1: ; RV32I: # %bb.0: +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: trunc_i8_to_i1: ; RV64I: # %bb.0: +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = trunc i8 %a to i1 ret i1 %1 @@ -332,10 +374,12 @@ define i1 @trunc_i16_to_i1(i16 %a) { ; RV32I-LABEL: trunc_i16_to_i1: ; RV32I: # %bb.0: +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: trunc_i16_to_i1: ; RV64I: # %bb.0: +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = trunc i16 %a to i1 ret i1 %1 @@ -344,10 +388,12 @@ define i1 @trunc_i32_to_i1(i32 %a) { ; RV32I-LABEL: trunc_i32_to_i1: ; RV32I: # %bb.0: +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: trunc_i32_to_i1: ; RV64I: # %bb.0: +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = trunc i32 %a to i1 ret i1 %1 @@ -356,10 +402,12 @@ define i1 @trunc_i64_to_i1(i64 %a) { ; RV32I-LABEL: trunc_i64_to_i1: ; RV32I: # %bb.0: +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: trunc_i64_to_i1: ; RV64I: # %bb.0: +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = trunc i64 %a to i1 ret i1 %1 @@ -368,10 +416,12 @@ define i8 @trunc_i16_to_i8(i16 %a) { ; RV32I-LABEL: trunc_i16_to_i8: ; RV32I: # %bb.0: +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: trunc_i16_to_i8: ; RV64I: # %bb.0: +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = trunc i16 %a to i8 ret i8 %1 @@ -380,10 +430,12 @@ define i8 @trunc_i32_to_i8(i32 %a) { ; RV32I-LABEL: trunc_i32_to_i8: ; RV32I: # %bb.0: +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: trunc_i32_to_i8: ; RV64I: # %bb.0: +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = trunc i32 %a to i8 ret i8 %1 @@ -392,10 +444,12 @@ define i8 @trunc_i64_to_i8(i64 %a) { ; RV32I-LABEL: trunc_i64_to_i8: ; RV32I: # %bb.0: +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: trunc_i64_to_i8: ; RV64I: # %bb.0: +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = trunc i64 %a to i8 ret i8 %1 @@ -404,10 +458,12 @@ define i16 @trunc_i32_to_i16(i32 %a) { ; RV32I-LABEL: trunc_i32_to_i16: ; RV32I: # %bb.0: +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: trunc_i32_to_i16: ; RV64I: # %bb.0: +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = trunc i32 %a to i16 ret i16 %1 @@ -416,10 +472,12 @@ define i16 @trunc_i64_to_i16(i64 %a) { ; RV32I-LABEL: trunc_i64_to_i16: ; RV32I: # %bb.0: +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: trunc_i64_to_i16: ; RV64I: # %bb.0: +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = trunc i64 %a to i16 ret i16 %1 @@ -428,10 +486,12 @@ define i32 @trunc_i64_to_i32(i64 %a) { ; RV32I-LABEL: trunc_i64_to_i32: ; RV32I: # %bb.0: +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: trunc_i64_to_i32: ; RV64I: # %bb.0: +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = trunc i64 %a to i32 ret i32 %1 Index: test/CodeGen/RISCV/tail-calls.ll =================================================================== --- test/CodeGen/RISCV/tail-calls.ll +++ test/CodeGen/RISCV/tail-calls.ll @@ -35,10 +35,12 @@ ; CHECK: lui a0, %hi(callee_indirect2) ; CHECK-NEXT: addi a5, a0, %lo(callee_indirect2) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: jr a5 ; CHECK: lui a0, %hi(callee_indirect1) ; CHECK-NEXT: addi a5, a0, %lo(callee_indirect1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: jr a5 entry: %tobool = icmp eq i32 %a, 0 Index: test/CodeGen/RISCV/umulo-128-legalisation-lowering.ll =================================================================== --- test/CodeGen/RISCV/umulo-128-legalisation-lowering.ll +++ test/CodeGen/RISCV/umulo-128-legalisation-lowering.ll @@ -5,6 +5,7 @@ ; RISCV32-LABEL: muloti_test: ; RISCV32: # %bb.0: # %start ; RISCV32-NEXT: addi sp, sp, -80 +; RISCV32-NEXT: .cfi_def_cfa_offset 80 ; RISCV32-NEXT: sw ra, 76(sp) ; RISCV32-NEXT: sw s0, 72(sp) ; RISCV32-NEXT: sw s1, 68(sp) @@ -13,6 +14,14 @@ ; RISCV32-NEXT: sw s4, 56(sp) ; RISCV32-NEXT: sw s5, 52(sp) ; RISCV32-NEXT: sw s6, 48(sp) +; RISCV32-NEXT: .cfi_offset ra, -4 +; RISCV32-NEXT: .cfi_offset s0, -8 +; RISCV32-NEXT: .cfi_offset s1, -12 +; RISCV32-NEXT: .cfi_offset s2, -16 +; RISCV32-NEXT: .cfi_offset s3, -20 +; RISCV32-NEXT: .cfi_offset s4, -24 +; RISCV32-NEXT: .cfi_offset s5, -28 +; RISCV32-NEXT: .cfi_offset s6, -32 ; RISCV32-NEXT: mv s1, a2 ; RISCV32-NEXT: mv s0, a1 ; RISCV32-NEXT: mv s2, a0 @@ -109,6 +118,7 @@ ; RISCV32-NEXT: lw s0, 72(sp) ; RISCV32-NEXT: lw ra, 76(sp) ; RISCV32-NEXT: addi sp, sp, 80 +; RISCV32-NEXT: .cfi_def_cfa_offset 0 ; RISCV32-NEXT: ret start: %0 = tail call { i128, i1 } @llvm.umul.with.overflow.i128(i128 %l, i128 %r) #2 Index: test/CodeGen/RISCV/zext-with-load-is-free.ll =================================================================== --- test/CodeGen/RISCV/zext-with-load-is-free.ll +++ test/CodeGen/RISCV/zext-with-load-is-free.ll @@ -20,9 +20,11 @@ ; RV32I-NEXT: bne a0, a1, .LBB0_3 ; RV32I-NEXT: # %bb.2: # %if.end ; RV32I-NEXT: mv a0, zero +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; RV32I-NEXT: .LBB0_3: # %if.then ; RV32I-NEXT: addi a0, zero, 1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret entry: %0 = load i8, i8* getelementptr inbounds ([5 x i8], [5 x i8]* @bytes, i32 0, i32 0), align 1 @@ -56,9 +58,11 @@ ; RV32I-NEXT: bne a0, a1, .LBB1_3 ; RV32I-NEXT: # %bb.2: # %if.end ; RV32I-NEXT: mv a0, zero +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; RV32I-NEXT: .LBB1_3: # %if.then ; RV32I-NEXT: addi a0, zero, 1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret entry: %0 = load i16, i16* getelementptr inbounds ([5 x i16], [5 x i16]* @shorts, i32 0, i32 0), align 2