diff --git a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp @@ -541,6 +541,7 @@ const RISCVRegisterInfo *RI = STI.getRegisterInfo(); MachineFrameInfo &MFI = MF.getFrameInfo(); auto *RVFI = MF.getInfo(); + const RISCVInstrInfo *TII = STI.getInstrInfo(); Register FPReg = getFPReg(STI); Register SPReg = getSPReg(STI); @@ -606,6 +607,50 @@ adjustReg(MBB, LastFrameDestroy, DL, SPReg, SPReg, SecondSPAdjustAmount, MachineInstr::FrameDestroy); + + // Emit ".cfi_def_cfa_offset FirstSPAdjustAmount" if using an sp-based CFA + if (!hasFP(MF)) { + unsigned CFIIndex = MF.addFrameInst( + MCCFIInstruction::cfiDefCfaOffset(nullptr, -FirstSPAdjustAmount)); + BuildMI(MBB, LastFrameDestroy, DL, + TII->get(TargetOpcode::CFI_INSTRUCTION)) + .addCFIIndex(CFIIndex); + } + } + + if (hasFP(MF)) { + // To find the instruction restoring FP from stack. + for (auto &I = LastFrameDestroy; I != MBBI; ++I) { + if (I->mayLoad() && I->getOperand(0).isReg()) { + Register DestReg = I->getOperand(0).getReg(); + if (DestReg == FPReg) { + // If there is frame pointer, after restoring $fp registers, we + // need adjust CFA back to the correct sp-based offset. + // Emit ".cfi_def_cfa $sp, CFAOffset" + uint64_t CFAOffset = + FirstSPAdjustAmount + ? FirstSPAdjustAmount + RVFI->getVarArgsSaveSize() + : FPOffset; + unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::cfiDefCfa( + nullptr, RI->getDwarfRegNum(SPReg, true), CFAOffset)); + BuildMI(MBB, std::next(I), DL, + TII->get(TargetOpcode::CFI_INSTRUCTION)) + .addCFIIndex(CFIIndex); + break; + } + } + } + } + + // Add CFI directives for callee-saved registers. + // Iterate over list of callee-saved registers and emit .cfi_restore + // directives. + for (const auto &Entry : CSI) { + Register Reg = Entry.getReg(); + unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::createRestore( + nullptr, RI->getDwarfRegNum(Reg, true))); + BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION)) + .addCFIIndex(CFIIndex); } if (FirstSPAdjustAmount) @@ -616,6 +661,13 @@ // Emit epilogue for shadow call stack. emitSCSEpilogue(MF, MBB, MBBI, DL); + + // After restoring $sp, we need to adjust CFA to $(sp + 0) + // Emit ".cfi_def_cfa_offset 0" + unsigned CFIIndex = + MF.addFrameInst(MCCFIInstruction::cfiDefCfaOffset(nullptr, 0)); + BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION)) + .addCFIIndex(CFIIndex); } StackOffset diff --git a/llvm/test/CodeGen/RISCV/addimm-mulimm.ll b/llvm/test/CodeGen/RISCV/addimm-mulimm.ll --- a/llvm/test/CodeGen/RISCV/addimm-mulimm.ll +++ b/llvm/test/CodeGen/RISCV/addimm-mulimm.ll @@ -13,6 +13,7 @@ ; RV32IMB-NEXT: addi a1, zero, 29 ; RV32IMB-NEXT: mul a0, a0, a1 ; RV32IMB-NEXT: addi a0, a0, 1073 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_accept_a1: @@ -20,6 +21,7 @@ ; RV64IMB-NEXT: addi a1, zero, 29 ; RV64IMB-NEXT: mulw a0, a0, a1 ; RV64IMB-NEXT: addiw a0, a0, 1073 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = add i32 %x, 37 %tmp1 = mul i32 %tmp0, 29 @@ -32,6 +34,7 @@ ; RV32IMB-NEXT: addi a1, zero, 29 ; RV32IMB-NEXT: mul a0, a0, a1 ; RV32IMB-NEXT: addi a0, a0, 1073 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_accept_a2: @@ -39,6 +42,7 @@ ; RV64IMB-NEXT: addi a1, zero, 29 ; RV64IMB-NEXT: mulw a0, a0, a1 ; RV64IMB-NEXT: addiw a0, a0, 1073 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = add i32 %x, 37 %tmp1 = mul i32 %tmp0, 29 @@ -56,6 +60,7 @@ ; RV32IMB-NEXT: addi a0, a2, 1073 ; RV32IMB-NEXT: sltu a2, a0, a2 ; RV32IMB-NEXT: add a1, a1, a2 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_accept_a3: @@ -63,6 +68,7 @@ ; RV64IMB-NEXT: addi a1, zero, 29 ; RV64IMB-NEXT: mul a0, a0, a1 ; RV64IMB-NEXT: addi a0, a0, 1073 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = add i64 %x, 37 %tmp1 = mul i64 %tmp0, 29 @@ -77,6 +83,7 @@ ; RV32IMB-NEXT: lui a1, 50 ; RV32IMB-NEXT: addi a1, a1, 1119 ; RV32IMB-NEXT: add a0, a0, a1 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_accept_b1: @@ -86,6 +93,7 @@ ; RV64IMB-NEXT: lui a1, 50 ; RV64IMB-NEXT: addiw a1, a1, 1119 ; RV64IMB-NEXT: addw a0, a0, a1 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = add i32 %x, 8953 %tmp1 = mul i32 %tmp0, 23 @@ -100,6 +108,7 @@ ; RV32IMB-NEXT: lui a1, 50 ; RV32IMB-NEXT: addi a1, a1, 1119 ; RV32IMB-NEXT: add a0, a0, a1 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_accept_b2: @@ -109,6 +118,7 @@ ; RV64IMB-NEXT: lui a1, 50 ; RV64IMB-NEXT: addiw a1, a1, 1119 ; RV64IMB-NEXT: addw a0, a0, a1 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = add i32 %x, 8953 %tmp1 = mul i32 %tmp0, 23 @@ -128,6 +138,7 @@ ; RV32IMB-NEXT: add a0, a2, a0 ; RV32IMB-NEXT: sltu a2, a0, a2 ; RV32IMB-NEXT: add a1, a1, a2 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_accept_b3: @@ -137,6 +148,7 @@ ; RV64IMB-NEXT: lui a1, 50 ; RV64IMB-NEXT: addiw a1, a1, 1119 ; RV64IMB-NEXT: add a0, a0, a1 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = add i64 %x, 8953 %tmp1 = mul i64 %tmp0, 23 @@ -149,6 +161,7 @@ ; RV32IMB-NEXT: addi a0, a0, 1971 ; RV32IMB-NEXT: addi a1, zero, 29 ; RV32IMB-NEXT: mul a0, a0, a1 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_reject_a1: @@ -156,6 +169,7 @@ ; RV64IMB-NEXT: addiw a0, a0, 1971 ; RV64IMB-NEXT: addi a1, zero, 29 ; RV64IMB-NEXT: mulw a0, a0, a1 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = add i32 %x, 1971 %tmp1 = mul i32 %tmp0, 29 @@ -168,6 +182,7 @@ ; RV32IMB-NEXT: addi a0, a0, 1971 ; RV32IMB-NEXT: addi a1, zero, 29 ; RV32IMB-NEXT: mul a0, a0, a1 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_reject_a2: @@ -175,6 +190,7 @@ ; RV64IMB-NEXT: addiw a0, a0, 1971 ; RV64IMB-NEXT: addi a1, zero, 29 ; RV64IMB-NEXT: mulw a0, a0, a1 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = add i32 %x, 1971 %tmp1 = mul i32 %tmp0, 29 @@ -194,6 +210,7 @@ ; RV32IMB-NEXT: add a0, a2, a0 ; RV32IMB-NEXT: sltu a2, a0, a2 ; RV32IMB-NEXT: add a1, a1, a2 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_reject_a3: @@ -201,6 +218,7 @@ ; RV64IMB-NEXT: addi a0, a0, 1971 ; RV64IMB-NEXT: addi a1, zero, 29 ; RV64IMB-NEXT: mul a0, a0, a1 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = add i64 %x, 1971 %tmp1 = mul i64 %tmp0, 29 @@ -213,6 +231,7 @@ ; RV32IMB-NEXT: addi a0, a0, 1000 ; RV32IMB-NEXT: sh3add a1, a0, a0 ; RV32IMB-NEXT: sh3add a0, a1, a0 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_reject_c1: @@ -221,6 +240,7 @@ ; RV64IMB-NEXT: sh3add a1, a0, a0 ; RV64IMB-NEXT: sh3add a0, a1, a0 ; RV64IMB-NEXT: sext.w a0, a0 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = add i32 %x, 1000 %tmp1 = mul i32 %tmp0, 73 @@ -233,6 +253,7 @@ ; RV32IMB-NEXT: addi a0, a0, 1000 ; RV32IMB-NEXT: sh3add a1, a0, a0 ; RV32IMB-NEXT: sh3add a0, a1, a0 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_reject_c2: @@ -241,6 +262,7 @@ ; RV64IMB-NEXT: sh3add a1, a0, a0 ; RV64IMB-NEXT: sh3add a0, a1, a0 ; RV64IMB-NEXT: sext.w a0, a0 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = add i32 %x, 1000 %tmp1 = mul i32 %tmp0, 73 @@ -260,6 +282,7 @@ ; RV32IMB-NEXT: add a0, a2, a0 ; RV32IMB-NEXT: sltu a2, a0, a2 ; RV32IMB-NEXT: add a1, a1, a2 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_reject_c3: @@ -267,6 +290,7 @@ ; RV64IMB-NEXT: addi a0, a0, 1000 ; RV64IMB-NEXT: sh3add a1, a0, a0 ; RV64IMB-NEXT: sh3add a0, a1, a0 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = add i64 %x, 1000 %tmp1 = mul i64 %tmp0, 73 @@ -279,6 +303,7 @@ ; RV32IMB-NEXT: addi a0, a0, 1000 ; RV32IMB-NEXT: sh1add a0, a0, a0 ; RV32IMB-NEXT: slli a0, a0, 6 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_reject_d1: @@ -286,6 +311,7 @@ ; RV64IMB-NEXT: addi a0, a0, 1000 ; RV64IMB-NEXT: sh1add a0, a0, a0 ; RV64IMB-NEXT: slliw a0, a0, 6 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = add i32 %x, 1000 %tmp1 = mul i32 %tmp0, 192 @@ -298,6 +324,7 @@ ; RV32IMB-NEXT: addi a0, a0, 1000 ; RV32IMB-NEXT: sh1add a0, a0, a0 ; RV32IMB-NEXT: slli a0, a0, 6 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_reject_d2: @@ -305,6 +332,7 @@ ; RV64IMB-NEXT: addi a0, a0, 1000 ; RV64IMB-NEXT: sh1add a0, a0, a0 ; RV64IMB-NEXT: slliw a0, a0, 6 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = add i32 %x, 1000 %tmp1 = mul i32 %tmp0, 192 @@ -326,6 +354,7 @@ ; RV32IMB-NEXT: add a0, a2, a0 ; RV32IMB-NEXT: sltu a2, a0, a2 ; RV32IMB-NEXT: add a1, a1, a2 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_reject_d3: @@ -333,6 +362,7 @@ ; RV64IMB-NEXT: addi a0, a0, 1000 ; RV64IMB-NEXT: sh1add a0, a0, a0 ; RV64IMB-NEXT: slli a0, a0, 6 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = add i64 %x, 1000 %tmp1 = mul i64 %tmp0, 192 @@ -345,6 +375,7 @@ ; RV32IMB-NEXT: addi a0, a0, 1971 ; RV32IMB-NEXT: addi a1, zero, 29 ; RV32IMB-NEXT: mul a0, a0, a1 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_reject_e1: @@ -352,6 +383,7 @@ ; RV64IMB-NEXT: addiw a0, a0, 1971 ; RV64IMB-NEXT: addi a1, zero, 29 ; RV64IMB-NEXT: mulw a0, a0, a1 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = mul i32 %x, 29 %tmp1 = add i32 %tmp0, 57159 @@ -364,6 +396,7 @@ ; RV32IMB-NEXT: addi a0, a0, 1971 ; RV32IMB-NEXT: addi a1, zero, 29 ; RV32IMB-NEXT: mul a0, a0, a1 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_reject_e2: @@ -371,6 +404,7 @@ ; RV64IMB-NEXT: addiw a0, a0, 1971 ; RV64IMB-NEXT: addi a1, zero, 29 ; RV64IMB-NEXT: mulw a0, a0, a1 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = mul i32 %x, 29 %tmp1 = add i32 %tmp0, 57159 @@ -390,6 +424,7 @@ ; RV32IMB-NEXT: add a0, a2, a0 ; RV32IMB-NEXT: sltu a2, a0, a2 ; RV32IMB-NEXT: add a1, a1, a2 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_reject_e3: @@ -397,6 +432,7 @@ ; RV64IMB-NEXT: addi a0, a0, 1971 ; RV64IMB-NEXT: addi a1, zero, 29 ; RV64IMB-NEXT: mul a0, a0, a1 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = mul i64 %x, 29 %tmp1 = add i64 %tmp0, 57159 @@ -410,6 +446,7 @@ ; RV32IMB-NEXT: addi a1, zero, 29 ; RV32IMB-NEXT: mul a0, a0, a1 ; RV32IMB-NEXT: addi a0, a0, 11 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_reject_f1: @@ -418,6 +455,7 @@ ; RV64IMB-NEXT: addi a1, zero, 29 ; RV64IMB-NEXT: mulw a0, a0, a1 ; RV64IMB-NEXT: addiw a0, a0, 11 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = mul i32 %x, 29 %tmp1 = add i32 %tmp0, 57199 @@ -431,6 +469,7 @@ ; RV32IMB-NEXT: addi a1, zero, 29 ; RV32IMB-NEXT: mul a0, a0, a1 ; RV32IMB-NEXT: addi a0, a0, 11 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_reject_f2: @@ -439,6 +478,7 @@ ; RV64IMB-NEXT: addi a1, zero, 29 ; RV64IMB-NEXT: mulw a0, a0, a1 ; RV64IMB-NEXT: addiw a0, a0, 11 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = mul i32 %x, 29 %tmp1 = add i32 %tmp0, 57199 @@ -458,6 +498,7 @@ ; RV32IMB-NEXT: add a0, a2, a0 ; RV32IMB-NEXT: sltu a2, a0, a2 ; RV32IMB-NEXT: add a1, a1, a2 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_reject_f3: @@ -466,6 +507,7 @@ ; RV64IMB-NEXT: addi a1, zero, 29 ; RV64IMB-NEXT: mul a0, a0, a1 ; RV64IMB-NEXT: addi a0, a0, 11 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = mul i64 %x, 29 %tmp1 = add i64 %tmp0, 57199 @@ -479,6 +521,7 @@ ; RV32IMB-NEXT: sh3add a1, a0, a0 ; RV32IMB-NEXT: sh3add a0, a1, a0 ; RV32IMB-NEXT: addi a0, a0, 10 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_reject_g1: @@ -487,6 +530,7 @@ ; RV64IMB-NEXT: sh3add a1, a0, a0 ; RV64IMB-NEXT: sh3add a0, a1, a0 ; RV64IMB-NEXT: addiw a0, a0, 10 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = mul i32 %x, 73 %tmp1 = add i32 %tmp0, 7310 @@ -500,6 +544,7 @@ ; RV32IMB-NEXT: sh3add a1, a0, a0 ; RV32IMB-NEXT: sh3add a0, a1, a0 ; RV32IMB-NEXT: addi a0, a0, 10 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_reject_g2: @@ -508,6 +553,7 @@ ; RV64IMB-NEXT: sh3add a1, a0, a0 ; RV64IMB-NEXT: sh3add a0, a1, a0 ; RV64IMB-NEXT: addiw a0, a0, 10 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = mul i32 %x, 73 %tmp1 = add i32 %tmp0, 7310 @@ -527,6 +573,7 @@ ; RV32IMB-NEXT: add a0, a2, a0 ; RV32IMB-NEXT: sltu a2, a0, a2 ; RV32IMB-NEXT: add a1, a1, a2 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_reject_g3: @@ -535,6 +582,7 @@ ; RV64IMB-NEXT: sh3add a1, a0, a0 ; RV64IMB-NEXT: sh3add a0, a1, a0 ; RV64IMB-NEXT: addi a0, a0, 10 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = mul i64 %x, 73 %tmp1 = add i64 %tmp0, 7310 @@ -555,6 +603,7 @@ ; RV32IMB-NEXT: addi a0, a0, 1024 ; RV32IMB-NEXT: sltu a2, a0, a2 ; RV32IMB-NEXT: add a1, a1, a2 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_infinite_loop: @@ -563,6 +612,7 @@ ; RV64IMB-NEXT: lui a1, 1 ; RV64IMB-NEXT: addiw a1, a1, -2048 ; RV64IMB-NEXT: sh3add a0, a0, a1 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = mul i64 %x, 24 %tmp1 = add i64 %tmp0, 2048 @@ -578,6 +628,7 @@ ; RV32IMB-NEXT: lui a1, 2 ; RV32IMB-NEXT: addi a1, a1, 798 ; RV32IMB-NEXT: add a0, a0, a1 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: mul3000_add8990_a: @@ -588,6 +639,7 @@ ; RV64IMB-NEXT: lui a1, 2 ; RV64IMB-NEXT: addiw a1, a1, 798 ; RV64IMB-NEXT: addw a0, a0, a1 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = mul i32 %x, 3000 %tmp1 = add i32 %tmp0, 8990 @@ -603,6 +655,7 @@ ; RV32IMB-NEXT: lui a1, 2 ; RV32IMB-NEXT: addi a1, a1, 798 ; RV32IMB-NEXT: add a0, a0, a1 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: mul3000_add8990_b: @@ -613,6 +666,7 @@ ; RV64IMB-NEXT: lui a1, 2 ; RV64IMB-NEXT: addiw a1, a1, 798 ; RV64IMB-NEXT: addw a0, a0, a1 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = mul i32 %x, 3000 %tmp1 = add i32 %tmp0, 8990 @@ -633,6 +687,7 @@ ; RV32IMB-NEXT: add a0, a2, a0 ; RV32IMB-NEXT: sltu a2, a0, a2 ; RV32IMB-NEXT: add a1, a1, a2 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: mul3000_add8990_c: @@ -643,6 +698,7 @@ ; RV64IMB-NEXT: lui a1, 2 ; RV64IMB-NEXT: addiw a1, a1, 798 ; RV64IMB-NEXT: add a0, a0, a1 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = mul i64 %x, 3000 %tmp1 = add i64 %tmp0, 8990 @@ -658,6 +714,7 @@ ; RV32IMB-NEXT: lui a1, 1048574 ; RV32IMB-NEXT: addi a1, a1, -798 ; RV32IMB-NEXT: add a0, a0, a1 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: mul3000_sub8990_a: @@ -668,6 +725,7 @@ ; RV64IMB-NEXT: lui a1, 1048574 ; RV64IMB-NEXT: addiw a1, a1, -798 ; RV64IMB-NEXT: addw a0, a0, a1 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = mul i32 %x, 3000 %tmp1 = add i32 %tmp0, -8990 @@ -683,6 +741,7 @@ ; RV32IMB-NEXT: lui a1, 1048574 ; RV32IMB-NEXT: addi a1, a1, -798 ; RV32IMB-NEXT: add a0, a0, a1 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: mul3000_sub8990_b: @@ -693,6 +752,7 @@ ; RV64IMB-NEXT: lui a1, 1048574 ; RV64IMB-NEXT: addiw a1, a1, -798 ; RV64IMB-NEXT: addw a0, a0, a1 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = mul i32 %x, 3000 %tmp1 = add i32 %tmp0, -8990 @@ -714,6 +774,7 @@ ; RV32IMB-NEXT: sltu a2, a0, a2 ; RV32IMB-NEXT: add a1, a1, a2 ; RV32IMB-NEXT: addi a1, a1, -1 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: mul3000_sub8990_c: @@ -724,6 +785,7 @@ ; RV64IMB-NEXT: lui a1, 1048574 ; RV64IMB-NEXT: addiw a1, a1, -798 ; RV64IMB-NEXT: add a0, a0, a1 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = mul i64 %x, 3000 %tmp1 = add i64 %tmp0, -8990 @@ -739,6 +801,7 @@ ; RV32IMB-NEXT: lui a1, 2 ; RV32IMB-NEXT: addi a1, a1, 798 ; RV32IMB-NEXT: add a0, a0, a1 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: mulneg3000_add8990_a: @@ -749,6 +812,7 @@ ; RV64IMB-NEXT: lui a1, 2 ; RV64IMB-NEXT: addiw a1, a1, 798 ; RV64IMB-NEXT: addw a0, a0, a1 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = mul i32 %x, -3000 %tmp1 = add i32 %tmp0, 8990 @@ -764,6 +828,7 @@ ; RV32IMB-NEXT: lui a1, 2 ; RV32IMB-NEXT: addi a1, a1, 798 ; RV32IMB-NEXT: add a0, a0, a1 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: mulneg3000_add8990_b: @@ -774,6 +839,7 @@ ; RV64IMB-NEXT: lui a1, 2 ; RV64IMB-NEXT: addiw a1, a1, 798 ; RV64IMB-NEXT: addw a0, a0, a1 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = mul i32 %x, -3000 %tmp1 = add i32 %tmp0, 8990 @@ -795,6 +861,7 @@ ; RV32IMB-NEXT: add a0, a2, a0 ; RV32IMB-NEXT: sltu a2, a0, a2 ; RV32IMB-NEXT: add a1, a1, a2 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: mulneg3000_add8990_c: @@ -805,6 +872,7 @@ ; RV64IMB-NEXT: lui a1, 2 ; RV64IMB-NEXT: addiw a1, a1, 798 ; RV64IMB-NEXT: add a0, a0, a1 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = mul i64 %x, -3000 %tmp1 = add i64 %tmp0, 8990 @@ -820,6 +888,7 @@ ; RV32IMB-NEXT: lui a1, 1048574 ; RV32IMB-NEXT: addi a1, a1, -798 ; RV32IMB-NEXT: add a0, a0, a1 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: mulneg3000_sub8990_a: @@ -830,6 +899,7 @@ ; RV64IMB-NEXT: lui a1, 1048574 ; RV64IMB-NEXT: addiw a1, a1, -798 ; RV64IMB-NEXT: addw a0, a0, a1 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = mul i32 %x, -3000 %tmp1 = add i32 %tmp0, -8990 @@ -845,6 +915,7 @@ ; RV32IMB-NEXT: lui a1, 1048574 ; RV32IMB-NEXT: addi a1, a1, -798 ; RV32IMB-NEXT: add a0, a0, a1 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: mulneg3000_sub8990_b: @@ -855,6 +926,7 @@ ; RV64IMB-NEXT: lui a1, 1048574 ; RV64IMB-NEXT: addiw a1, a1, -798 ; RV64IMB-NEXT: addw a0, a0, a1 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = mul i32 %x, -3000 %tmp1 = add i32 %tmp0, -8990 @@ -877,6 +949,7 @@ ; RV32IMB-NEXT: sltu a2, a0, a2 ; RV32IMB-NEXT: add a1, a1, a2 ; RV32IMB-NEXT: addi a1, a1, -1 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: mulneg3000_sub8990_c: @@ -887,6 +960,7 @@ ; RV64IMB-NEXT: lui a1, 1048574 ; RV64IMB-NEXT: addiw a1, a1, -798 ; RV64IMB-NEXT: add a0, a0, a1 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = mul i64 %x, -3000 %tmp1 = add i64 %tmp0, -8990 diff --git a/llvm/test/CodeGen/RISCV/addrspacecast.ll b/llvm/test/CodeGen/RISCV/addrspacecast.ll --- a/llvm/test/CodeGen/RISCV/addrspacecast.ll +++ b/llvm/test/CodeGen/RISCV/addrspacecast.ll @@ -8,11 +8,13 @@ ; RV32I-LABEL: cast0: ; RV32I: # %bb.0: ; RV32I-NEXT: sw zero, 0(a0) +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: cast0: ; RV64I: # %bb.0: ; RV64I-NEXT: sw zero, 0(a0) +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %ptr0 = addrspacecast i32 addrspace(1)* %ptr to i32 addrspace(0)* store i32 0, i32* %ptr0 @@ -28,7 +30,9 @@ ; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: call foo@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: .cfi_restore ra ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: cast1: @@ -39,7 +43,9 @@ ; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: call foo@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: .cfi_restore ra ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %castptr = addrspacecast i32* %ptr to i32 addrspace(10)* call void @foo(i32 addrspace(10)* %castptr) diff --git a/llvm/test/CodeGen/RISCV/aext-to-sext.ll b/llvm/test/CodeGen/RISCV/aext-to-sext.ll --- a/llvm/test/CodeGen/RISCV/aext-to-sext.ll +++ b/llvm/test/CodeGen/RISCV/aext-to-sext.ll @@ -62,6 +62,7 @@ ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 ; RV64I-NEXT: beq a0, a1, .LBB1_1 ; RV64I-NEXT: # %bb.2: # %bar +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret br label %bb diff --git a/llvm/test/CodeGen/RISCV/alu32.ll b/llvm/test/CodeGen/RISCV/alu32.ll --- a/llvm/test/CodeGen/RISCV/alu32.ll +++ b/llvm/test/CodeGen/RISCV/alu32.ll @@ -133,12 +133,14 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: srli a0, a0, 3 ; RV32I-NEXT: ori a0, a0, 1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: srli_demandedbits: ; RV64I: # %bb.0: ; RV64I-NEXT: srliw a0, a0, 3 ; RV64I-NEXT: ori a0, a0, 1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %2 = lshr i32 %0, 3 %3 = or i32 %2, 1 diff --git a/llvm/test/CodeGen/RISCV/bswap-ctlz-cttz-ctpop.ll b/llvm/test/CodeGen/RISCV/bswap-ctlz-cttz-ctpop.ll --- a/llvm/test/CodeGen/RISCV/bswap-ctlz-cttz-ctpop.ll +++ b/llvm/test/CodeGen/RISCV/bswap-ctlz-cttz-ctpop.ll @@ -1286,6 +1286,7 @@ ; RV32I-NEXT: srli a1, a0, 1 ; RV32I-NEXT: xor a0, a0, a1 ; RV32I-NEXT: andi a0, a0, 1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: test_parity_i32: @@ -1303,6 +1304,7 @@ ; RV64I-NEXT: srli a1, a0, 1 ; RV64I-NEXT: xor a0, a0, a1 ; RV64I-NEXT: andi a0, a0, 1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = call i32 @llvm.ctpop.i32(i32 %a) %2 = and i32 %1, 1 @@ -1325,6 +1327,7 @@ ; RV32I-NEXT: xor a0, a0, a1 ; RV32I-NEXT: andi a0, a0, 1 ; RV32I-NEXT: mv a1, zero +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: test_parity_i64: @@ -1342,6 +1345,7 @@ ; RV64I-NEXT: srli a1, a0, 1 ; RV64I-NEXT: xor a0, a0, a1 ; RV64I-NEXT: andi a0, a0, 1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = call i64 @llvm.ctpop.i64(i64 %a) %2 = and i64 %1, 1 diff --git a/llvm/test/CodeGen/RISCV/calling-conv-vector-float.ll b/llvm/test/CodeGen/RISCV/calling-conv-vector-float.ll --- a/llvm/test/CodeGen/RISCV/calling-conv-vector-float.ll +++ b/llvm/test/CodeGen/RISCV/calling-conv-vector-float.ll @@ -15,12 +15,14 @@ ; RV64-NEXT: fadd.s ft0, ft1, ft0 ; RV64-NEXT: fmv.x.w a0, ft0 ; RV64-NEXT: fmv.x.w a1, ft2 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV64LP64F-LABEL: callee_v2f32: ; RV64LP64F: # %bb.0: ; RV64LP64F-NEXT: fadd.s fa0, fa0, fa2 ; RV64LP64F-NEXT: fadd.s fa1, fa1, fa3 +; RV64LP64F-NEXT: .cfi_def_cfa_offset 0 ; RV64LP64F-NEXT: ret %z = fadd <2 x float> %x, %y ret <2 x float> %z @@ -45,6 +47,7 @@ ; RV64-NEXT: fsw ft1, 8(a0) ; RV64-NEXT: fsw ft3, 4(a0) ; RV64-NEXT: fsw ft5, 0(a0) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV64LP64F-LABEL: callee_v4f32: @@ -57,6 +60,7 @@ ; RV64LP64F-NEXT: fsw ft2, 8(a0) ; RV64LP64F-NEXT: fsw ft1, 4(a0) ; RV64LP64F-NEXT: fsw ft0, 0(a0) +; RV64LP64F-NEXT: .cfi_def_cfa_offset 0 ; RV64LP64F-NEXT: ret %z = fadd <4 x float> %x, %y ret <4 x float> %z diff --git a/llvm/test/CodeGen/RISCV/double-convert.ll b/llvm/test/CodeGen/RISCV/double-convert.ll --- a/llvm/test/CodeGen/RISCV/double-convert.ll +++ b/llvm/test/CodeGen/RISCV/double-convert.ll @@ -144,6 +144,7 @@ ; RV32IFD-NEXT: mv a0, a1 ; RV32IFD-NEXT: .LBB5_2: ; RV32IFD-NEXT: addi sp, sp, 16 +; RV32IFD-NEXT: .cfi_def_cfa_offset 0 ; RV32IFD-NEXT: ret ; ; RV64IFD-LABEL: fcvt_wu_d_multiple_use: @@ -155,6 +156,7 @@ ; RV64IFD-NEXT: # %bb.1: ; RV64IFD-NEXT: mv a0, a1 ; RV64IFD-NEXT: .LBB5_2: +; RV64IFD-NEXT: .cfi_def_cfa_offset 0 ; RV64IFD-NEXT: ret %a = fptoui double %x to i32 %b = icmp eq i32 %a, 0 @@ -639,6 +641,7 @@ ; RV32IFD-NEXT: addi a0, a0, 1 ; RV32IFD-NEXT: fcvt.d.w ft0, a0 ; RV32IFD-NEXT: fsd ft0, 0(a1) +; RV32IFD-NEXT: .cfi_def_cfa_offset 0 ; RV32IFD-NEXT: ret ; ; RV64IFD-LABEL: fcvt_d_w_demanded_bits: @@ -646,6 +649,7 @@ ; RV64IFD-NEXT: addiw a0, a0, 1 ; RV64IFD-NEXT: fcvt.d.w ft0, a0 ; RV64IFD-NEXT: fsd ft0, 0(a1) +; RV64IFD-NEXT: .cfi_def_cfa_offset 0 ; RV64IFD-NEXT: ret %3 = add i32 %0, 1 %4 = sitofp i32 %3 to double @@ -660,6 +664,7 @@ ; RV32IFD-NEXT: addi a0, a0, 1 ; RV32IFD-NEXT: fcvt.d.wu ft0, a0 ; RV32IFD-NEXT: fsd ft0, 0(a1) +; RV32IFD-NEXT: .cfi_def_cfa_offset 0 ; RV32IFD-NEXT: ret ; ; RV64IFD-LABEL: fcvt_d_wu_demanded_bits: @@ -667,6 +672,7 @@ ; RV64IFD-NEXT: addiw a0, a0, 1 ; RV64IFD-NEXT: fcvt.d.wu ft0, a0 ; RV64IFD-NEXT: fsd ft0, 0(a1) +; RV64IFD-NEXT: .cfi_def_cfa_offset 0 ; RV64IFD-NEXT: ret %3 = add i32 %0, 1 %4 = uitofp i32 %3 to double diff --git a/llvm/test/CodeGen/RISCV/exception-pointer-register.ll b/llvm/test/CodeGen/RISCV/exception-pointer-register.ll --- a/llvm/test/CodeGen/RISCV/exception-pointer-register.ll +++ b/llvm/test/CodeGen/RISCV/exception-pointer-register.ll @@ -40,7 +40,11 @@ ; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: .cfi_restore ra +; RV32I-NEXT: .cfi_restore s0 +; RV32I-NEXT: .cfi_restore s1 ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; RV32I-NEXT: .LBB0_4: # %lpad ; RV32I-NEXT: .Ltmp4: @@ -77,7 +81,11 @@ ; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload +; RV64I-NEXT: .cfi_restore ra +; RV64I-NEXT: .cfi_restore s0 +; RV64I-NEXT: .cfi_restore s1 ; RV64I-NEXT: addi sp, sp, 32 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; RV64I-NEXT: .LBB0_4: # %lpad ; RV64I-NEXT: .Ltmp4: @@ -111,10 +119,12 @@ define internal void @callee(i1* %p) { ; RV32I-LABEL: callee: ; RV32I: # %bb.0: +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: callee: ; RV64I: # %bb.0: +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ret void } diff --git a/llvm/test/CodeGen/RISCV/float-convert.ll b/llvm/test/CodeGen/RISCV/float-convert.ll --- a/llvm/test/CodeGen/RISCV/float-convert.ll +++ b/llvm/test/CodeGen/RISCV/float-convert.ll @@ -82,6 +82,7 @@ ; RV32IF-NEXT: # %bb.1: ; RV32IF-NEXT: mv a0, a1 ; RV32IF-NEXT: .LBB3_2: +; RV32IF-NEXT: .cfi_def_cfa_offset 0 ; RV32IF-NEXT: ret ; ; RV64IF-LABEL: fcvt_wu_s_multiple_use: @@ -93,6 +94,7 @@ ; RV64IF-NEXT: # %bb.1: ; RV64IF-NEXT: mv a0, a1 ; RV64IF-NEXT: .LBB3_2: +; RV64IF-NEXT: .cfi_def_cfa_offset 0 ; RV64IF-NEXT: ret %a = fptoui float %x to i32 %b = icmp eq i32 %a, 0 @@ -526,6 +528,7 @@ ; RV32IF-NEXT: addi a0, a0, 1 ; RV32IF-NEXT: fcvt.s.w ft0, a0 ; RV32IF-NEXT: fsw ft0, 0(a1) +; RV32IF-NEXT: .cfi_def_cfa_offset 0 ; RV32IF-NEXT: ret ; ; RV64IF-LABEL: fcvt_s_w_demanded_bits: @@ -533,6 +536,7 @@ ; RV64IF-NEXT: addiw a0, a0, 1 ; RV64IF-NEXT: fcvt.s.w ft0, a0 ; RV64IF-NEXT: fsw ft0, 0(a1) +; RV64IF-NEXT: .cfi_def_cfa_offset 0 ; RV64IF-NEXT: ret %3 = add i32 %0, 1 %4 = sitofp i32 %3 to float @@ -547,6 +551,7 @@ ; RV32IF-NEXT: addi a0, a0, 1 ; RV32IF-NEXT: fcvt.s.wu ft0, a0 ; RV32IF-NEXT: fsw ft0, 0(a1) +; RV32IF-NEXT: .cfi_def_cfa_offset 0 ; RV32IF-NEXT: ret ; ; RV64IF-LABEL: fcvt_s_wu_demanded_bits: @@ -554,6 +559,7 @@ ; RV64IF-NEXT: addiw a0, a0, 1 ; RV64IF-NEXT: fcvt.s.wu ft0, a0 ; RV64IF-NEXT: fsw ft0, 0(a1) +; RV64IF-NEXT: .cfi_def_cfa_offset 0 ; RV64IF-NEXT: ret %3 = add i32 %0, 1 %4 = uitofp i32 %3 to float diff --git a/llvm/test/CodeGen/RISCV/fpenv.ll b/llvm/test/CodeGen/RISCV/fpenv.ll --- a/llvm/test/CodeGen/RISCV/fpenv.ll +++ b/llvm/test/CodeGen/RISCV/fpenv.ll @@ -11,6 +11,7 @@ ; RV32IF-NEXT: addi a1, a1, 769 ; RV32IF-NEXT: srl a0, a1, a0 ; RV32IF-NEXT: andi a0, a0, 7 +; RV32IF-NEXT: .cfi_def_cfa_offset 0 ; RV32IF-NEXT: ret ; ; RV64IF-LABEL: func_01: @@ -21,6 +22,7 @@ ; RV64IF-NEXT: addiw a1, a1, 769 ; RV64IF-NEXT: srl a0, a1, a0 ; RV64IF-NEXT: andi a0, a0, 7 +; RV64IF-NEXT: .cfi_def_cfa_offset 0 ; RV64IF-NEXT: ret %rm = call i32 @llvm.flt.rounds() ret i32 %rm @@ -35,6 +37,7 @@ ; RV32IF-NEXT: srl a0, a1, a0 ; RV32IF-NEXT: andi a0, a0, 7 ; RV32IF-NEXT: fsrm a0 +; RV32IF-NEXT: .cfi_def_cfa_offset 0 ; RV32IF-NEXT: ret ; ; RV64IF-LABEL: func_02: @@ -46,6 +49,7 @@ ; RV64IF-NEXT: srl a0, a1, a0 ; RV64IF-NEXT: andi a0, a0, 7 ; RV64IF-NEXT: fsrm a0 +; RV64IF-NEXT: .cfi_def_cfa_offset 0 ; RV64IF-NEXT: ret call void @llvm.set.rounding(i32 %rm) ret void @@ -55,11 +59,13 @@ ; RV32IF-LABEL: func_03: ; RV32IF: # %bb.0: ; RV32IF-NEXT: fsrmi 1 +; RV32IF-NEXT: .cfi_def_cfa_offset 0 ; RV32IF-NEXT: ret ; ; RV64IF-LABEL: func_03: ; RV64IF: # %bb.0: ; RV64IF-NEXT: fsrmi 1 +; RV64IF-NEXT: .cfi_def_cfa_offset 0 ; RV64IF-NEXT: ret call void @llvm.set.rounding(i32 0) ret void @@ -69,11 +75,13 @@ ; RV32IF-LABEL: func_04: ; RV32IF: # %bb.0: ; RV32IF-NEXT: fsrmi 0 +; RV32IF-NEXT: .cfi_def_cfa_offset 0 ; RV32IF-NEXT: ret ; ; RV64IF-LABEL: func_04: ; RV64IF: # %bb.0: ; RV64IF-NEXT: fsrmi 0 +; RV64IF-NEXT: .cfi_def_cfa_offset 0 ; RV64IF-NEXT: ret call void @llvm.set.rounding(i32 1) ret void @@ -83,11 +91,13 @@ ; RV32IF-LABEL: func_05: ; RV32IF: # %bb.0: ; RV32IF-NEXT: fsrmi 3 +; RV32IF-NEXT: .cfi_def_cfa_offset 0 ; RV32IF-NEXT: ret ; ; RV64IF-LABEL: func_05: ; RV64IF: # %bb.0: ; RV64IF-NEXT: fsrmi 3 +; RV64IF-NEXT: .cfi_def_cfa_offset 0 ; RV64IF-NEXT: ret call void @llvm.set.rounding(i32 2) ret void @@ -97,11 +107,13 @@ ; RV32IF-LABEL: func_06: ; RV32IF: # %bb.0: ; RV32IF-NEXT: fsrmi 2 +; RV32IF-NEXT: .cfi_def_cfa_offset 0 ; RV32IF-NEXT: ret ; ; RV64IF-LABEL: func_06: ; RV64IF: # %bb.0: ; RV64IF-NEXT: fsrmi 2 +; RV64IF-NEXT: .cfi_def_cfa_offset 0 ; RV64IF-NEXT: ret call void @llvm.set.rounding(i32 3) ret void @@ -111,11 +123,13 @@ ; RV32IF-LABEL: func_07: ; RV32IF: # %bb.0: ; RV32IF-NEXT: fsrmi 4 +; RV32IF-NEXT: .cfi_def_cfa_offset 0 ; RV32IF-NEXT: ret ; ; RV64IF-LABEL: func_07: ; RV64IF: # %bb.0: ; RV64IF-NEXT: fsrmi 4 +; RV64IF-NEXT: .cfi_def_cfa_offset 0 ; RV64IF-NEXT: ret call void @llvm.set.rounding(i32 4) ret void diff --git a/llvm/test/CodeGen/RISCV/frame-info.ll b/llvm/test/CodeGen/RISCV/frame-info.ll --- a/llvm/test/CodeGen/RISCV/frame-info.ll +++ b/llvm/test/CodeGen/RISCV/frame-info.ll @@ -11,10 +11,12 @@ define void @trivial() { ; RV32-LABEL: trivial: ; RV32: # %bb.0: +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: trivial: ; RV64: # %bb.0: +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32-WITHFP-LABEL: trivial: @@ -28,12 +30,13 @@ ; RV32-WITHFP-NEXT: addi s0, sp, 16 ; RV32-WITHFP-NEXT: .cfi_def_cfa s0, 0 ; RV32-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32-WITHFP-NEXT: .cfi_def_cfa sp, 16 ; RV32-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32-WITHFP-NEXT: .cfi_restore ra +; RV32-WITHFP-NEXT: .cfi_restore s0 ; RV32-WITHFP-NEXT: addi sp, sp, 16 +; RV32-WITHFP-NEXT: .cfi_def_cfa_offset 0 ; RV32-WITHFP-NEXT: ret -; -; RV64-WITHFP-LABEL: trivial: -; RV64-WITHFP: # %bb.0: ; RV64-WITHFP-NEXT: addi sp, sp, -16 ; RV64-WITHFP-NEXT: .cfi_def_cfa_offset 16 ; RV64-WITHFP-NEXT: sd ra, 8(sp) # 8-byte Folded Spill @@ -63,12 +66,16 @@ ; RV32-NEXT: addi a0, a0, 15 ; RV32-NEXT: andi a0, a0, -16 ; RV32-NEXT: sub a0, sp, a0 -; RV32-NEXT: mv sp, a0 +; RV32-NEXT: mv sp, a0 ; RV32-NEXT: call callee_with_args@plt ; RV32-NEXT: addi sp, s0, -16 ; RV32-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32-NEXT: .cfi_def_cfa sp, 16 ; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32-NEXT: .cfi_restore ra +; RV32-NEXT: .cfi_restore s0 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: stack_alloc: @@ -90,8 +97,12 @@ ; RV64-NEXT: call callee_with_args@plt ; RV64-NEXT: addi sp, s0, -16 ; RV64-NEXT: ld s0, 0(sp) # 8-byte Folded Reload +; RV64-NEXT: .cfi_def_cfa sp, 16 ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64-NEXT: .cfi_restore ra +; RV64-NEXT: .cfi_restore s0 ; RV64-NEXT: addi sp, sp, 16 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32-WITHFP-LABEL: stack_alloc: @@ -111,8 +122,12 @@ ; RV32-WITHFP-NEXT: call callee_with_args@plt ; RV32-WITHFP-NEXT: addi sp, s0, -16 ; RV32-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32-WITHFP-NEXT: .cfi_def_cfa sp, 16 ; RV32-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32-WITHFP-NEXT: .cfi_restore ra +; RV32-WITHFP-NEXT: .cfi_restore s0 ; RV32-WITHFP-NEXT: addi sp, sp, 16 +; RV32-WITHFP-NEXT: .cfi_def_cfa_offset 0 ; RV32-WITHFP-NEXT: ret ; ; RV64-WITHFP-LABEL: stack_alloc: @@ -157,7 +172,9 @@ ; RV32-NEXT: .cfi_offset ra, -4 ; RV32-NEXT: call callee2@plt ; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32-NEXT: .cfi_restore ra ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: branch_and_tail_call: @@ -173,7 +190,9 @@ ; RV64-NEXT: .cfi_offset ra, -8 ; RV64-NEXT: call callee2@plt ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64-NEXT: .cfi_restore ra ; RV64-NEXT: addi sp, sp, 16 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32-WITHFP-LABEL: branch_and_tail_call: @@ -193,8 +212,12 @@ ; RV32-WITHFP-NEXT: .cfi_def_cfa s0, 0 ; RV32-WITHFP-NEXT: call callee2@plt ; RV32-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32-WITHFP-NEXT: .cfi_def_cfa sp, 16 ; RV32-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32-WITHFP-NEXT: .cfi_restore ra +; RV32-WITHFP-NEXT: .cfi_restore s0 ; RV32-WITHFP-NEXT: addi sp, sp, 16 +; RV32-WITHFP-NEXT: .cfi_def_cfa_offset 0 ; RV32-WITHFP-NEXT: ret ; ; RV64-WITHFP-LABEL: branch_and_tail_call: diff --git a/llvm/test/CodeGen/RISCV/half-convert.ll b/llvm/test/CodeGen/RISCV/half-convert.ll --- a/llvm/test/CodeGen/RISCV/half-convert.ll +++ b/llvm/test/CodeGen/RISCV/half-convert.ll @@ -146,6 +146,7 @@ ; RV32IZFH-NEXT: # %bb.1: ; RV32IZFH-NEXT: mv a0, a1 ; RV32IZFH-NEXT: .LBB3_2: +; RV32IZFH-NEXT: .cfi_def_cfa_offset 0 ; RV32IZFH-NEXT: ret ; ; RV32IDZFH-LABEL: fcvt_ui_h_multiple_use: @@ -156,6 +157,7 @@ ; RV32IDZFH-NEXT: # %bb.1: ; RV32IDZFH-NEXT: mv a0, a1 ; RV32IDZFH-NEXT: .LBB3_2: +; RV32IDZFH-NEXT: .cfi_def_cfa_offset 0 ; RV32IDZFH-NEXT: ret ; ; RV64IZFH-LABEL: fcvt_ui_h_multiple_use: @@ -166,6 +168,7 @@ ; RV64IZFH-NEXT: # %bb.1: ; RV64IZFH-NEXT: mv a0, a1 ; RV64IZFH-NEXT: .LBB3_2: +; RV64IZFH-NEXT: .cfi_def_cfa_offset 0 ; RV64IZFH-NEXT: ret ; ; RV64IDZFH-LABEL: fcvt_ui_h_multiple_use: @@ -176,6 +179,7 @@ ; RV64IDZFH-NEXT: # %bb.1: ; RV64IDZFH-NEXT: mv a0, a1 ; RV64IDZFH-NEXT: .LBB3_2: +; RV64IDZFH-NEXT: .cfi_def_cfa_offset 0 ; RV64IDZFH-NEXT: ret %a = fptoui half %x to i32 %b = icmp eq i32 %a, 0 @@ -1154,6 +1158,7 @@ ; RV32IZFH-NEXT: addi a0, a0, 1 ; RV32IZFH-NEXT: fcvt.h.w ft0, a0 ; RV32IZFH-NEXT: fsh ft0, 0(a1) +; RV32IZFH-NEXT: .cfi_def_cfa_offset 0 ; RV32IZFH-NEXT: ret ; ; RV32IDZFH-LABEL: fcvt_h_w_demanded_bits: @@ -1161,6 +1166,7 @@ ; RV32IDZFH-NEXT: addi a0, a0, 1 ; RV32IDZFH-NEXT: fcvt.h.w ft0, a0 ; RV32IDZFH-NEXT: fsh ft0, 0(a1) +; RV32IDZFH-NEXT: .cfi_def_cfa_offset 0 ; RV32IDZFH-NEXT: ret ; ; RV64IZFH-LABEL: fcvt_h_w_demanded_bits: @@ -1168,6 +1174,7 @@ ; RV64IZFH-NEXT: addiw a0, a0, 1 ; RV64IZFH-NEXT: fcvt.h.w ft0, a0 ; RV64IZFH-NEXT: fsh ft0, 0(a1) +; RV64IZFH-NEXT: .cfi_def_cfa_offset 0 ; RV64IZFH-NEXT: ret ; ; RV64IDZFH-LABEL: fcvt_h_w_demanded_bits: @@ -1175,6 +1182,7 @@ ; RV64IDZFH-NEXT: addiw a0, a0, 1 ; RV64IDZFH-NEXT: fcvt.h.w ft0, a0 ; RV64IDZFH-NEXT: fsh ft0, 0(a1) +; RV64IDZFH-NEXT: .cfi_def_cfa_offset 0 ; RV64IDZFH-NEXT: ret %3 = add i32 %0, 1 %4 = sitofp i32 %3 to half @@ -1189,6 +1197,7 @@ ; RV32IZFH-NEXT: addi a0, a0, 1 ; RV32IZFH-NEXT: fcvt.h.wu ft0, a0 ; RV32IZFH-NEXT: fsh ft0, 0(a1) +; RV32IZFH-NEXT: .cfi_def_cfa_offset 0 ; RV32IZFH-NEXT: ret ; ; RV32IDZFH-LABEL: fcvt_h_wu_demanded_bits: @@ -1196,6 +1205,7 @@ ; RV32IDZFH-NEXT: addi a0, a0, 1 ; RV32IDZFH-NEXT: fcvt.h.wu ft0, a0 ; RV32IDZFH-NEXT: fsh ft0, 0(a1) +; RV32IDZFH-NEXT: .cfi_def_cfa_offset 0 ; RV32IDZFH-NEXT: ret ; ; RV64IZFH-LABEL: fcvt_h_wu_demanded_bits: @@ -1203,6 +1213,7 @@ ; RV64IZFH-NEXT: addiw a0, a0, 1 ; RV64IZFH-NEXT: fcvt.h.wu ft0, a0 ; RV64IZFH-NEXT: fsh ft0, 0(a1) +; RV64IZFH-NEXT: .cfi_def_cfa_offset 0 ; RV64IZFH-NEXT: ret ; ; RV64IDZFH-LABEL: fcvt_h_wu_demanded_bits: @@ -1210,6 +1221,7 @@ ; RV64IDZFH-NEXT: addiw a0, a0, 1 ; RV64IDZFH-NEXT: fcvt.h.wu ft0, a0 ; RV64IDZFH-NEXT: fsh ft0, 0(a1) +; RV64IDZFH-NEXT: .cfi_def_cfa_offset 0 ; RV64IDZFH-NEXT: ret %3 = add i32 %0, 1 %4 = uitofp i32 %3 to half diff --git a/llvm/test/CodeGen/RISCV/inline-asm-S-constraint.ll b/llvm/test/CodeGen/RISCV/inline-asm-S-constraint.ll --- a/llvm/test/CodeGen/RISCV/inline-asm-S-constraint.ll +++ b/llvm/test/CodeGen/RISCV/inline-asm-S-constraint.ll @@ -11,6 +11,7 @@ ; RV32-NEXT: lui a0, %hi(var) ; RV32-NEXT: addi a0, a0, %lo(var) ; RV32-NEXT: #NO_APP +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: constraint_S: @@ -19,6 +20,7 @@ ; RV64-NEXT: lui a0, %hi(var) ; RV64-NEXT: addi a0, a0, %lo(var) ; RV64-NEXT: #NO_APP +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ret = tail call i8* asm "lui $0, %hi($1)\0Aaddi $0, $0, %lo($1)", "=r,S"(i32* nonnull @var) ret i8* %ret @@ -34,6 +36,7 @@ ; RV32-NEXT: lui a0, %hi(.Ltmp0) ; RV32-NEXT: addi a0, a0, %lo(.Ltmp0) ; RV32-NEXT: #NO_APP +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: constraint_S_label: @@ -44,6 +47,7 @@ ; RV64-NEXT: lui a0, %hi(.Ltmp0) ; RV64-NEXT: addi a0, a0, %lo(.Ltmp0) ; RV64-NEXT: #NO_APP +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret entry: br label %L1 diff --git a/llvm/test/CodeGen/RISCV/large-stack.ll b/llvm/test/CodeGen/RISCV/large-stack.ll --- a/llvm/test/CodeGen/RISCV/large-stack.ll +++ b/llvm/test/CodeGen/RISCV/large-stack.ll @@ -16,6 +16,7 @@ ; RV32I-FPELIM-NEXT: lui a0, 74565 ; RV32I-FPELIM-NEXT: addi a0, a0, 1664 ; RV32I-FPELIM-NEXT: add sp, sp, a0 +; RV32I-FPELIM-NEXT: .cfi_def_cfa_offset 0 ; RV32I-FPELIM-NEXT: ret ; ; RV32I-WITHFP-LABEL: test: @@ -35,8 +36,12 @@ ; RV32I-WITHFP-NEXT: addi a0, a0, -352 ; RV32I-WITHFP-NEXT: add sp, sp, a0 ; RV32I-WITHFP-NEXT: lw s0, 2024(sp) # 4-byte Folded Reload +; RV32I-WITHFP-NEXT: .cfi_def_cfa sp, 2032 ; RV32I-WITHFP-NEXT: lw ra, 2028(sp) # 4-byte Folded Reload +; RV32I-WITHFP-NEXT: .cfi_restore ra +; RV32I-WITHFP-NEXT: .cfi_restore s0 ; RV32I-WITHFP-NEXT: addi sp, sp, 2032 +; RV32I-WITHFP-NEXT: .cfi_def_cfa_offset 0 ; RV32I-WITHFP-NEXT: ret %tmp = alloca [ 305419896 x i8 ] , align 4 ret void @@ -74,9 +79,13 @@ ; RV32I-FPELIM-NEXT: lui a0, 97 ; RV32I-FPELIM-NEXT: addi a0, a0, 672 ; RV32I-FPELIM-NEXT: add sp, sp, a0 +; RV32I-FPELIM-NEXT: .cfi_def_cfa_offset -2032 ; RV32I-FPELIM-NEXT: lw s1, 2024(sp) # 4-byte Folded Reload ; RV32I-FPELIM-NEXT: lw s0, 2028(sp) # 4-byte Folded Reload +; RV32I-FPELIM-NEXT: .cfi_restore s0 +; RV32I-FPELIM-NEXT: .cfi_restore s1 ; RV32I-FPELIM-NEXT: addi sp, sp, 2032 +; RV32I-FPELIM-NEXT: .cfi_def_cfa_offset 0 ; RV32I-FPELIM-NEXT: ret ; ; RV32I-WITHFP-LABEL: test_emergency_spill_slot: @@ -117,8 +126,14 @@ ; RV32I-WITHFP-NEXT: lw s2, 2016(sp) # 4-byte Folded Reload ; RV32I-WITHFP-NEXT: lw s1, 2020(sp) # 4-byte Folded Reload ; RV32I-WITHFP-NEXT: lw s0, 2024(sp) # 4-byte Folded Reload +; RV32I-WITHFP-NEXT: .cfi_def_cfa sp, 2032 ; RV32I-WITHFP-NEXT: lw ra, 2028(sp) # 4-byte Folded Reload +; RV32I-WITHFP-NEXT: .cfi_restore ra +; RV32I-WITHFP-NEXT: .cfi_restore s0 +; RV32I-WITHFP-NEXT: .cfi_restore s1 +; RV32I-WITHFP-NEXT: .cfi_restore s2 ; RV32I-WITHFP-NEXT: addi sp, sp, 2032 +; RV32I-WITHFP-NEXT: .cfi_def_cfa_offset 0 ; RV32I-WITHFP-NEXT: ret %data = alloca [ 100000 x i32 ] , align 4 %ptr = getelementptr inbounds [100000 x i32], [100000 x i32]* %data, i32 0, i32 80000 diff --git a/llvm/test/CodeGen/RISCV/neg-abs.ll b/llvm/test/CodeGen/RISCV/neg-abs.ll --- a/llvm/test/CodeGen/RISCV/neg-abs.ll +++ b/llvm/test/CodeGen/RISCV/neg-abs.ll @@ -17,6 +17,7 @@ ; RV32I-NEXT: srai a1, a0, 31 ; RV32I-NEXT: xor a0, a0, a1 ; RV32I-NEXT: sub a0, a1, a0 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IBT-LABEL: neg_abs32: @@ -24,6 +25,7 @@ ; RV32IBT-NEXT: srai a1, a0, 31 ; RV32IBT-NEXT: xor a0, a0, a1 ; RV32IBT-NEXT: sub a0, a1, a0 +; RV32IBT-NEXT: .cfi_def_cfa_offset 0 ; RV32IBT-NEXT: ret ; ; RV64I-LABEL: neg_abs32: @@ -31,6 +33,7 @@ ; RV64I-NEXT: sraiw a1, a0, 31 ; RV64I-NEXT: xor a0, a0, a1 ; RV64I-NEXT: subw a0, a1, a0 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IBT-LABEL: neg_abs32: @@ -38,6 +41,7 @@ ; RV64IBT-NEXT: sraiw a1, a0, 31 ; RV64IBT-NEXT: xor a0, a0, a1 ; RV64IBT-NEXT: subw a0, a1, a0 +; RV64IBT-NEXT: .cfi_def_cfa_offset 0 ; RV64IBT-NEXT: ret %abs = tail call i32 @llvm.abs.i32(i32 %x, i1 true) %neg = sub nsw i32 0, %abs @@ -50,6 +54,7 @@ ; RV32I-NEXT: srai a1, a0, 31 ; RV32I-NEXT: xor a0, a0, a1 ; RV32I-NEXT: sub a0, a1, a0 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IBT-LABEL: select_neg_abs32: @@ -57,6 +62,7 @@ ; RV32IBT-NEXT: srai a1, a0, 31 ; RV32IBT-NEXT: xor a0, a0, a1 ; RV32IBT-NEXT: sub a0, a1, a0 +; RV32IBT-NEXT: .cfi_def_cfa_offset 0 ; RV32IBT-NEXT: ret ; ; RV64I-LABEL: select_neg_abs32: @@ -64,6 +70,7 @@ ; RV64I-NEXT: sraiw a1, a0, 31 ; RV64I-NEXT: xor a0, a0, a1 ; RV64I-NEXT: subw a0, a1, a0 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IBT-LABEL: select_neg_abs32: @@ -71,6 +78,7 @@ ; RV64IBT-NEXT: sraiw a1, a0, 31 ; RV64IBT-NEXT: xor a0, a0, a1 ; RV64IBT-NEXT: subw a0, a1, a0 +; RV64IBT-NEXT: .cfi_def_cfa_offset 0 ; RV64IBT-NEXT: ret %1 = icmp slt i32 %x, 0 %2 = sub nsw i32 0, %x @@ -88,6 +96,7 @@ ; RV32I-NEXT: sub a1, a2, a1 ; RV32I-NEXT: sub a1, a1, a3 ; RV32I-NEXT: sub a0, a2, a0 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IBT-LABEL: neg_abs64: @@ -99,6 +108,7 @@ ; RV32IBT-NEXT: sub a1, a2, a1 ; RV32IBT-NEXT: sub a1, a1, a3 ; RV32IBT-NEXT: sub a0, a2, a0 +; RV32IBT-NEXT: .cfi_def_cfa_offset 0 ; RV32IBT-NEXT: ret ; ; RV64I-LABEL: neg_abs64: @@ -106,6 +116,7 @@ ; RV64I-NEXT: srai a1, a0, 63 ; RV64I-NEXT: xor a0, a0, a1 ; RV64I-NEXT: sub a0, a1, a0 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IBT-LABEL: neg_abs64: @@ -113,6 +124,7 @@ ; RV64IBT-NEXT: srai a1, a0, 63 ; RV64IBT-NEXT: xor a0, a0, a1 ; RV64IBT-NEXT: sub a0, a1, a0 +; RV64IBT-NEXT: .cfi_def_cfa_offset 0 ; RV64IBT-NEXT: ret %abs = tail call i64 @llvm.abs.i64(i64 %x, i1 true) %neg = sub nsw i64 0, %abs @@ -129,6 +141,7 @@ ; RV32I-NEXT: sub a1, a2, a1 ; RV32I-NEXT: sub a1, a1, a3 ; RV32I-NEXT: sub a0, a2, a0 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IBT-LABEL: select_neg_abs64: @@ -140,6 +153,7 @@ ; RV32IBT-NEXT: sub a1, a2, a1 ; RV32IBT-NEXT: sub a1, a1, a3 ; RV32IBT-NEXT: sub a0, a2, a0 +; RV32IBT-NEXT: .cfi_def_cfa_offset 0 ; RV32IBT-NEXT: ret ; ; RV64I-LABEL: select_neg_abs64: @@ -147,6 +161,7 @@ ; RV64I-NEXT: srai a1, a0, 63 ; RV64I-NEXT: xor a0, a0, a1 ; RV64I-NEXT: sub a0, a1, a0 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IBT-LABEL: select_neg_abs64: @@ -154,6 +169,7 @@ ; RV64IBT-NEXT: srai a1, a0, 63 ; RV64IBT-NEXT: xor a0, a0, a1 ; RV64IBT-NEXT: sub a0, a1, a0 +; RV64IBT-NEXT: .cfi_def_cfa_offset 0 ; RV64IBT-NEXT: ret %1 = icmp slt i64 %x, 0 %2 = sub nsw i64 0, %x diff --git a/llvm/test/CodeGen/RISCV/patchable-function-entry.ll b/llvm/test/CodeGen/RISCV/patchable-function-entry.ll --- a/llvm/test/CodeGen/RISCV/patchable-function-entry.ll +++ b/llvm/test/CodeGen/RISCV/patchable-function-entry.ll @@ -18,8 +18,10 @@ ; CHECK-LABEL: f1: ; CHECK-NEXT: .Lfunc_begin1: ; NORVC: addi zero, zero, 0 +; NORVC-NEXT: cfi_def_cfa_offset 0 ; NORVC-NEXT: jalr zero, 0(ra) ; RVC: c.nop +; RVC-NEXT: cfi_def_cfa_offset 0 ; RVC-NEXT: c.jr ra ; CHECK: .section __patchable_function_entries,"awo",@progbits,f1{{$}} ; 32: .p2align 2 @@ -34,8 +36,10 @@ ; CHECK-LABEL: f5: ; CHECK-NEXT: .Lfunc_begin2: ; NORVC-COUNT-5: addi zero, zero, 0 +; NORVC-NEXT: cfi_def_cfa_offset 0 ; NORVC-NEXT: jalr zero, 0(ra) ; RVC-COUNT-5: c.nop +; RVC-NEXT: cfi_def_cfa_offset 0 ; RVC-NEXT: c.jr ra ; CHECK: .section __patchable_function_entries,"aGwo",@progbits,f5,comdat,f5{{$}} ; RV32: .p2align 2 diff --git a/llvm/test/CodeGen/RISCV/rv32zba.ll b/llvm/test/CodeGen/RISCV/rv32zba.ll --- a/llvm/test/CodeGen/RISCV/rv32zba.ll +++ b/llvm/test/CodeGen/RISCV/rv32zba.ll @@ -10,12 +10,14 @@ ; RV32I-NEXT: slli a0, a0, 1 ; RV32I-NEXT: add a0, a2, a0 ; RV32I-NEXT: lh a0, 0(a0) +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: sh1add: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh1add a0, a0, a2 ; RV32ZBA-NEXT: lh a0, 0(a0) +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %3 = getelementptr inbounds i16, i16* %1, i64 %0 %4 = load i16, i16* %3 @@ -28,12 +30,14 @@ ; RV32I-NEXT: slli a0, a0, 2 ; RV32I-NEXT: add a0, a2, a0 ; RV32I-NEXT: lw a0, 0(a0) +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: sh2add: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh2add a0, a0, a2 ; RV32ZBA-NEXT: lw a0, 0(a0) +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %3 = getelementptr inbounds i32, i32* %1, i64 %0 %4 = load i32, i32* %3 @@ -47,6 +51,7 @@ ; RV32I-NEXT: add a1, a2, a0 ; RV32I-NEXT: lw a0, 0(a1) ; RV32I-NEXT: lw a1, 4(a1) +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: sh3add: @@ -54,6 +59,7 @@ ; RV32ZBA-NEXT: sh3add a1, a0, a2 ; RV32ZBA-NEXT: lw a0, 0(a1) ; RV32ZBA-NEXT: lw a1, 4(a1) +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %3 = getelementptr inbounds i64, i64* %1, i64 %0 %4 = load i64, i64* %3 @@ -66,12 +72,14 @@ ; RV32I-NEXT: addi a2, zero, 6 ; RV32I-NEXT: mul a0, a0, a2 ; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: addmul6: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh1add a0, a0, a0 ; RV32ZBA-NEXT: sh1add a0, a0, a1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 6 %d = add i32 %c, %b @@ -84,12 +92,14 @@ ; RV32I-NEXT: addi a2, zero, 10 ; RV32I-NEXT: mul a0, a0, a2 ; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: addmul10: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh2add a0, a0, a0 ; RV32ZBA-NEXT: sh1add a0, a0, a1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 10 %d = add i32 %c, %b @@ -102,12 +112,14 @@ ; RV32I-NEXT: addi a2, zero, 12 ; RV32I-NEXT: mul a0, a0, a2 ; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: addmul12: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh1add a0, a0, a0 ; RV32ZBA-NEXT: sh2add a0, a0, a1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 12 %d = add i32 %c, %b @@ -120,12 +132,14 @@ ; RV32I-NEXT: addi a2, zero, 18 ; RV32I-NEXT: mul a0, a0, a2 ; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: addmul18: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh3add a0, a0, a0 ; RV32ZBA-NEXT: sh1add a0, a0, a1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 18 %d = add i32 %c, %b @@ -138,12 +152,14 @@ ; RV32I-NEXT: addi a2, zero, 20 ; RV32I-NEXT: mul a0, a0, a2 ; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: addmul20: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh2add a0, a0, a0 ; RV32ZBA-NEXT: sh2add a0, a0, a1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 20 %d = add i32 %c, %b @@ -156,12 +172,14 @@ ; RV32I-NEXT: addi a2, zero, 24 ; RV32I-NEXT: mul a0, a0, a2 ; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: addmul24: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh1add a0, a0, a0 ; RV32ZBA-NEXT: sh3add a0, a0, a1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 24 %d = add i32 %c, %b @@ -174,12 +192,14 @@ ; RV32I-NEXT: addi a2, zero, 36 ; RV32I-NEXT: mul a0, a0, a2 ; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: addmul36: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh3add a0, a0, a0 ; RV32ZBA-NEXT: sh2add a0, a0, a1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 36 %d = add i32 %c, %b @@ -192,12 +212,14 @@ ; RV32I-NEXT: addi a2, zero, 40 ; RV32I-NEXT: mul a0, a0, a2 ; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: addmul40: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh2add a0, a0, a0 ; RV32ZBA-NEXT: sh3add a0, a0, a1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 40 %d = add i32 %c, %b @@ -210,12 +232,14 @@ ; RV32I-NEXT: addi a2, zero, 72 ; RV32I-NEXT: mul a0, a0, a2 ; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: addmul72: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh3add a0, a0, a0 ; RV32ZBA-NEXT: sh3add a0, a0, a1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 72 %d = add i32 %c, %b @@ -227,12 +251,14 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi a1, zero, 96 ; RV32I-NEXT: mul a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: mul96: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh1add a0, a0, a0 ; RV32ZBA-NEXT: slli a0, a0, 5 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 96 ret i32 %c @@ -243,12 +269,14 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi a1, zero, 160 ; RV32I-NEXT: mul a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: mul160: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh2add a0, a0, a0 ; RV32ZBA-NEXT: slli a0, a0, 5 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 160 ret i32 %c @@ -259,12 +287,14 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi a1, zero, 288 ; RV32I-NEXT: mul a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: mul288: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh3add a0, a0, a0 ; RV32ZBA-NEXT: slli a0, a0, 5 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 288 ret i32 %c @@ -275,12 +305,14 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi a1, zero, 258 ; RV32I-NEXT: mul a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: mul258: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: addi a1, zero, 258 ; RV32ZBA-NEXT: mul a0, a0, a1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 258 ret i32 %c @@ -291,12 +323,14 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi a1, zero, 260 ; RV32I-NEXT: mul a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: mul260: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: addi a1, zero, 260 ; RV32ZBA-NEXT: mul a0, a0, a1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 260 ret i32 %c @@ -307,12 +341,14 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi a1, zero, 264 ; RV32I-NEXT: mul a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: mul264: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: addi a1, zero, 264 ; RV32ZBA-NEXT: mul a0, a0, a1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 264 ret i32 %c @@ -323,12 +359,14 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi a1, zero, 11 ; RV32I-NEXT: mul a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: mul11: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh2add a1, a0, a0 ; RV32ZBA-NEXT: sh1add a0, a1, a0 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 11 ret i32 %c @@ -339,12 +377,14 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi a1, zero, 19 ; RV32I-NEXT: mul a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: mul19: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh3add a1, a0, a0 ; RV32ZBA-NEXT: sh1add a0, a1, a0 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 19 ret i32 %c @@ -355,12 +395,14 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi a1, zero, 13 ; RV32I-NEXT: mul a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: mul13: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh1add a1, a0, a0 ; RV32ZBA-NEXT: sh2add a0, a1, a0 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 13 ret i32 %c @@ -371,12 +413,14 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi a1, zero, 21 ; RV32I-NEXT: mul a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: mul21: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh2add a1, a0, a0 ; RV32ZBA-NEXT: sh2add a0, a1, a0 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 21 ret i32 %c @@ -387,12 +431,14 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi a1, zero, 37 ; RV32I-NEXT: mul a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: mul37: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh3add a1, a0, a0 ; RV32ZBA-NEXT: sh2add a0, a1, a0 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 37 ret i32 %c @@ -403,12 +449,14 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi a1, zero, 25 ; RV32I-NEXT: mul a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: mul25: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh1add a1, a0, a0 ; RV32ZBA-NEXT: sh3add a0, a1, a0 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 25 ret i32 %c @@ -419,12 +467,14 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi a1, zero, 41 ; RV32I-NEXT: mul a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: mul41: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh2add a1, a0, a0 ; RV32ZBA-NEXT: sh3add a0, a1, a0 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 41 ret i32 %c @@ -435,12 +485,14 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi a1, zero, 73 ; RV32I-NEXT: mul a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: mul73: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh3add a1, a0, a0 ; RV32ZBA-NEXT: sh3add a0, a1, a0 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 73 ret i32 %c @@ -451,12 +503,14 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi a1, zero, 27 ; RV32I-NEXT: mul a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: mul27: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh3add a0, a0, a0 ; RV32ZBA-NEXT: sh1add a0, a0, a0 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 27 ret i32 %c @@ -467,12 +521,14 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi a1, zero, 45 ; RV32I-NEXT: mul a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: mul45: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh3add a0, a0, a0 ; RV32ZBA-NEXT: sh2add a0, a0, a0 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 45 ret i32 %c @@ -483,12 +539,14 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi a1, zero, 81 ; RV32I-NEXT: mul a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: mul81: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh3add a0, a0, a0 ; RV32ZBA-NEXT: sh3add a0, a0, a0 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 81 ret i32 %c @@ -500,12 +558,14 @@ ; RV32I-NEXT: lui a1, 1 ; RV32I-NEXT: addi a1, a1, 2 ; RV32I-NEXT: mul a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: mul4098: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: slli a1, a0, 12 ; RV32ZBA-NEXT: sh1add a0, a0, a1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 4098 ret i32 %c @@ -517,12 +577,14 @@ ; RV32I-NEXT: lui a1, 1 ; RV32I-NEXT: addi a1, a1, 4 ; RV32I-NEXT: mul a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: mul4100: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: slli a1, a0, 12 ; RV32ZBA-NEXT: sh2add a0, a0, a1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 4100 ret i32 %c @@ -534,12 +596,14 @@ ; RV32I-NEXT: lui a1, 1 ; RV32I-NEXT: addi a1, a1, 8 ; RV32I-NEXT: mul a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: mul4104: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: slli a1, a0, 12 ; RV32ZBA-NEXT: sh3add a0, a0, a1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 4104 ret i32 %c @@ -551,12 +615,14 @@ ; RV32I-NEXT: lui a1, 1 ; RV32I-NEXT: addi a1, a1, 8 ; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: add4104: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: addi a1, zero, 1026 ; RV32ZBA-NEXT: sh2add a0, a1, a0 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = add i32 %a, 4104 ret i32 %c @@ -568,12 +634,14 @@ ; RV32I-NEXT: lui a1, 2 ; RV32I-NEXT: addi a1, a1, 16 ; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: add8208: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: addi a1, zero, 1026 ; RV32ZBA-NEXT: sh3add a0, a1, a0 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = add i32 %a, 8208 ret i32 %c @@ -585,12 +653,14 @@ ; RV32I-NEXT: slli a0, a0, 5 ; RV32I-NEXT: slli a1, a1, 6 ; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: addshl_5_6: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh1add a0, a1, a0 ; RV32ZBA-NEXT: slli a0, a0, 5 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = shl i32 %a, 5 %d = shl i32 %b, 6 @@ -604,12 +674,14 @@ ; RV32I-NEXT: slli a0, a0, 5 ; RV32I-NEXT: slli a1, a1, 7 ; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: addshl_5_7: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh2add a0, a1, a0 ; RV32ZBA-NEXT: slli a0, a0, 5 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = shl i32 %a, 5 %d = shl i32 %b, 7 @@ -623,12 +695,14 @@ ; RV32I-NEXT: slli a0, a0, 5 ; RV32I-NEXT: slli a1, a1, 8 ; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: addshl_5_8: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh3add a0, a1, a0 ; RV32ZBA-NEXT: slli a0, a0, 5 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = shl i32 %a, 5 %d = shl i32 %b, 8