diff --git a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp @@ -541,6 +541,7 @@ const RISCVRegisterInfo *RI = STI.getRegisterInfo(); MachineFrameInfo &MFI = MF.getFrameInfo(); auto *RVFI = MF.getInfo(); + const RISCVInstrInfo *TII = STI.getInstrInfo(); Register FPReg = getFPReg(STI); Register SPReg = getSPReg(STI); @@ -606,6 +607,50 @@ adjustReg(MBB, LastFrameDestroy, DL, SPReg, SPReg, SecondSPAdjustAmount, MachineInstr::FrameDestroy); + + // Emit ".cfi_def_cfa_offset FirstSPAdjustAmount" if using an sp-based CFA + if (!hasFP(MF)) { + unsigned CFIIndex = MF.addFrameInst( + MCCFIInstruction::cfiDefCfaOffset(nullptr, -FirstSPAdjustAmount)); + BuildMI(MBB, LastFrameDestroy, DL, + TII->get(TargetOpcode::CFI_INSTRUCTION)) + .addCFIIndex(CFIIndex); + } + } + + if (hasFP(MF)) { + // To find the instruction restoring FP from stack. + for (auto &I = LastFrameDestroy; I != MBBI; ++I) { + if (I->mayLoad() && I->getOperand(0).isReg()) { + Register DestReg = I->getOperand(0).getReg(); + if (DestReg == FPReg) { + // If there is frame pointer, after restoring $fp registers, we + // need adjust CFA back to the correct sp-based offset. + // Emit ".cfi_def_cfa $sp, CFAOffset" + uint64_t CFAOffset = + FirstSPAdjustAmount + ? FirstSPAdjustAmount + RVFI->getVarArgsSaveSize() + : FPOffset; + unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::cfiDefCfa( + nullptr, RI->getDwarfRegNum(SPReg, true), CFAOffset)); + BuildMI(MBB, std::next(I), DL, + TII->get(TargetOpcode::CFI_INSTRUCTION)) + .addCFIIndex(CFIIndex); + break; + } + } + } + } + + // Add CFI directives for callee-saved registers. + // Iterate over list of callee-saved registers and emit .cfi_restore + // directives. + for (const auto &Entry : CSI) { + Register Reg = Entry.getReg(); + unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::createRestore( + nullptr, RI->getDwarfRegNum(Reg, true))); + BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION)) + .addCFIIndex(CFIIndex); } if (FirstSPAdjustAmount) @@ -616,6 +661,13 @@ // Emit epilogue for shadow call stack. emitSCSEpilogue(MF, MBB, MBBI, DL); + + // After restoring $sp, we need to adjust CFA to $(sp + 0) + // Emit ".cfi_def_cfa_offset 0" + unsigned CFIIndex = + MF.addFrameInst(MCCFIInstruction::cfiDefCfaOffset(nullptr, 0)); + BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION)) + .addCFIIndex(CFIIndex); } StackOffset diff --git a/llvm/test/CodeGen/RISCV/addimm-mulimm.ll b/llvm/test/CodeGen/RISCV/addimm-mulimm.ll --- a/llvm/test/CodeGen/RISCV/addimm-mulimm.ll +++ b/llvm/test/CodeGen/RISCV/addimm-mulimm.ll @@ -13,6 +13,7 @@ ; RV32IMB-NEXT: addi a1, zero, 29 ; RV32IMB-NEXT: mul a0, a0, a1 ; RV32IMB-NEXT: addi a0, a0, 1073 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_accept_a1: @@ -20,6 +21,7 @@ ; RV64IMB-NEXT: addi a1, zero, 29 ; RV64IMB-NEXT: mulw a0, a0, a1 ; RV64IMB-NEXT: addiw a0, a0, 1073 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = add i32 %x, 37 %tmp1 = mul i32 %tmp0, 29 @@ -32,6 +34,7 @@ ; RV32IMB-NEXT: addi a1, zero, 29 ; RV32IMB-NEXT: mul a0, a0, a1 ; RV32IMB-NEXT: addi a0, a0, 1073 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_accept_a2: @@ -39,6 +42,7 @@ ; RV64IMB-NEXT: addi a1, zero, 29 ; RV64IMB-NEXT: mulw a0, a0, a1 ; RV64IMB-NEXT: addiw a0, a0, 1073 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = add i32 %x, 37 %tmp1 = mul i32 %tmp0, 29 @@ -56,6 +60,7 @@ ; RV32IMB-NEXT: addi a0, a2, 1073 ; RV32IMB-NEXT: sltu a2, a0, a2 ; RV32IMB-NEXT: add a1, a1, a2 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_accept_a3: @@ -63,6 +68,7 @@ ; RV64IMB-NEXT: addi a1, zero, 29 ; RV64IMB-NEXT: mul a0, a0, a1 ; RV64IMB-NEXT: addi a0, a0, 1073 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = add i64 %x, 37 %tmp1 = mul i64 %tmp0, 29 @@ -77,6 +83,7 @@ ; RV32IMB-NEXT: lui a1, 50 ; RV32IMB-NEXT: addi a1, a1, 1119 ; RV32IMB-NEXT: add a0, a0, a1 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_accept_b1: @@ -86,6 +93,7 @@ ; RV64IMB-NEXT: lui a1, 50 ; RV64IMB-NEXT: addiw a1, a1, 1119 ; RV64IMB-NEXT: addw a0, a0, a1 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = add i32 %x, 8953 %tmp1 = mul i32 %tmp0, 23 @@ -100,6 +108,7 @@ ; RV32IMB-NEXT: lui a1, 50 ; RV32IMB-NEXT: addi a1, a1, 1119 ; RV32IMB-NEXT: add a0, a0, a1 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_accept_b2: @@ -109,6 +118,7 @@ ; RV64IMB-NEXT: lui a1, 50 ; RV64IMB-NEXT: addiw a1, a1, 1119 ; RV64IMB-NEXT: addw a0, a0, a1 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = add i32 %x, 8953 %tmp1 = mul i32 %tmp0, 23 @@ -128,6 +138,7 @@ ; RV32IMB-NEXT: add a0, a2, a0 ; RV32IMB-NEXT: sltu a2, a0, a2 ; RV32IMB-NEXT: add a1, a1, a2 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_accept_b3: @@ -137,6 +148,7 @@ ; RV64IMB-NEXT: lui a1, 50 ; RV64IMB-NEXT: addiw a1, a1, 1119 ; RV64IMB-NEXT: add a0, a0, a1 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = add i64 %x, 8953 %tmp1 = mul i64 %tmp0, 23 @@ -149,6 +161,7 @@ ; RV32IMB-NEXT: addi a0, a0, 1971 ; RV32IMB-NEXT: addi a1, zero, 29 ; RV32IMB-NEXT: mul a0, a0, a1 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_reject_a1: @@ -156,6 +169,7 @@ ; RV64IMB-NEXT: addiw a0, a0, 1971 ; RV64IMB-NEXT: addi a1, zero, 29 ; RV64IMB-NEXT: mulw a0, a0, a1 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = add i32 %x, 1971 %tmp1 = mul i32 %tmp0, 29 @@ -168,6 +182,7 @@ ; RV32IMB-NEXT: addi a0, a0, 1971 ; RV32IMB-NEXT: addi a1, zero, 29 ; RV32IMB-NEXT: mul a0, a0, a1 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_reject_a2: @@ -175,6 +190,7 @@ ; RV64IMB-NEXT: addiw a0, a0, 1971 ; RV64IMB-NEXT: addi a1, zero, 29 ; RV64IMB-NEXT: mulw a0, a0, a1 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = add i32 %x, 1971 %tmp1 = mul i32 %tmp0, 29 @@ -194,6 +210,7 @@ ; RV32IMB-NEXT: add a0, a2, a0 ; RV32IMB-NEXT: sltu a2, a0, a2 ; RV32IMB-NEXT: add a1, a1, a2 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_reject_a3: @@ -201,6 +218,7 @@ ; RV64IMB-NEXT: addi a0, a0, 1971 ; RV64IMB-NEXT: addi a1, zero, 29 ; RV64IMB-NEXT: mul a0, a0, a1 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = add i64 %x, 1971 %tmp1 = mul i64 %tmp0, 29 @@ -213,6 +231,7 @@ ; RV32IMB-NEXT: addi a0, a0, 1000 ; RV32IMB-NEXT: sh3add a1, a0, a0 ; RV32IMB-NEXT: sh3add a0, a1, a0 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_reject_c1: @@ -221,6 +240,7 @@ ; RV64IMB-NEXT: sh3add a1, a0, a0 ; RV64IMB-NEXT: sh3add a0, a1, a0 ; RV64IMB-NEXT: sext.w a0, a0 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = add i32 %x, 1000 %tmp1 = mul i32 %tmp0, 73 @@ -233,6 +253,7 @@ ; RV32IMB-NEXT: addi a0, a0, 1000 ; RV32IMB-NEXT: sh3add a1, a0, a0 ; RV32IMB-NEXT: sh3add a0, a1, a0 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_reject_c2: @@ -241,6 +262,7 @@ ; RV64IMB-NEXT: sh3add a1, a0, a0 ; RV64IMB-NEXT: sh3add a0, a1, a0 ; RV64IMB-NEXT: sext.w a0, a0 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = add i32 %x, 1000 %tmp1 = mul i32 %tmp0, 73 @@ -260,6 +282,7 @@ ; RV32IMB-NEXT: add a0, a2, a0 ; RV32IMB-NEXT: sltu a2, a0, a2 ; RV32IMB-NEXT: add a1, a1, a2 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_reject_c3: @@ -267,6 +290,7 @@ ; RV64IMB-NEXT: addi a0, a0, 1000 ; RV64IMB-NEXT: sh3add a1, a0, a0 ; RV64IMB-NEXT: sh3add a0, a1, a0 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = add i64 %x, 1000 %tmp1 = mul i64 %tmp0, 73 @@ -279,6 +303,7 @@ ; RV32IMB-NEXT: addi a0, a0, 1000 ; RV32IMB-NEXT: sh1add a0, a0, a0 ; RV32IMB-NEXT: slli a0, a0, 6 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_reject_d1: @@ -286,6 +311,7 @@ ; RV64IMB-NEXT: addi a0, a0, 1000 ; RV64IMB-NEXT: sh1add a0, a0, a0 ; RV64IMB-NEXT: slliw a0, a0, 6 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = add i32 %x, 1000 %tmp1 = mul i32 %tmp0, 192 @@ -298,6 +324,7 @@ ; RV32IMB-NEXT: addi a0, a0, 1000 ; RV32IMB-NEXT: sh1add a0, a0, a0 ; RV32IMB-NEXT: slli a0, a0, 6 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_reject_d2: @@ -305,6 +332,7 @@ ; RV64IMB-NEXT: addi a0, a0, 1000 ; RV64IMB-NEXT: sh1add a0, a0, a0 ; RV64IMB-NEXT: slliw a0, a0, 6 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = add i32 %x, 1000 %tmp1 = mul i32 %tmp0, 192 @@ -326,6 +354,7 @@ ; RV32IMB-NEXT: add a0, a2, a0 ; RV32IMB-NEXT: sltu a2, a0, a2 ; RV32IMB-NEXT: add a1, a1, a2 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_reject_d3: @@ -333,6 +362,7 @@ ; RV64IMB-NEXT: addi a0, a0, 1000 ; RV64IMB-NEXT: sh1add a0, a0, a0 ; RV64IMB-NEXT: slli a0, a0, 6 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = add i64 %x, 1000 %tmp1 = mul i64 %tmp0, 192 @@ -345,6 +375,7 @@ ; RV32IMB-NEXT: addi a0, a0, 1971 ; RV32IMB-NEXT: addi a1, zero, 29 ; RV32IMB-NEXT: mul a0, a0, a1 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_reject_e1: @@ -352,6 +383,7 @@ ; RV64IMB-NEXT: addiw a0, a0, 1971 ; RV64IMB-NEXT: addi a1, zero, 29 ; RV64IMB-NEXT: mulw a0, a0, a1 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = mul i32 %x, 29 %tmp1 = add i32 %tmp0, 57159 @@ -364,6 +396,7 @@ ; RV32IMB-NEXT: addi a0, a0, 1971 ; RV32IMB-NEXT: addi a1, zero, 29 ; RV32IMB-NEXT: mul a0, a0, a1 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_reject_e2: @@ -371,6 +404,7 @@ ; RV64IMB-NEXT: addiw a0, a0, 1971 ; RV64IMB-NEXT: addi a1, zero, 29 ; RV64IMB-NEXT: mulw a0, a0, a1 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = mul i32 %x, 29 %tmp1 = add i32 %tmp0, 57159 @@ -390,6 +424,7 @@ ; RV32IMB-NEXT: add a0, a2, a0 ; RV32IMB-NEXT: sltu a2, a0, a2 ; RV32IMB-NEXT: add a1, a1, a2 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_reject_e3: @@ -397,6 +432,7 @@ ; RV64IMB-NEXT: addi a0, a0, 1971 ; RV64IMB-NEXT: addi a1, zero, 29 ; RV64IMB-NEXT: mul a0, a0, a1 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = mul i64 %x, 29 %tmp1 = add i64 %tmp0, 57159 @@ -410,6 +446,7 @@ ; RV32IMB-NEXT: addi a1, zero, 29 ; RV32IMB-NEXT: mul a0, a0, a1 ; RV32IMB-NEXT: addi a0, a0, 11 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_reject_f1: @@ -418,6 +455,7 @@ ; RV64IMB-NEXT: addi a1, zero, 29 ; RV64IMB-NEXT: mulw a0, a0, a1 ; RV64IMB-NEXT: addiw a0, a0, 11 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = mul i32 %x, 29 %tmp1 = add i32 %tmp0, 57199 @@ -431,6 +469,7 @@ ; RV32IMB-NEXT: addi a1, zero, 29 ; RV32IMB-NEXT: mul a0, a0, a1 ; RV32IMB-NEXT: addi a0, a0, 11 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_reject_f2: @@ -439,6 +478,7 @@ ; RV64IMB-NEXT: addi a1, zero, 29 ; RV64IMB-NEXT: mulw a0, a0, a1 ; RV64IMB-NEXT: addiw a0, a0, 11 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = mul i32 %x, 29 %tmp1 = add i32 %tmp0, 57199 @@ -458,6 +498,7 @@ ; RV32IMB-NEXT: add a0, a2, a0 ; RV32IMB-NEXT: sltu a2, a0, a2 ; RV32IMB-NEXT: add a1, a1, a2 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_reject_f3: @@ -466,6 +507,7 @@ ; RV64IMB-NEXT: addi a1, zero, 29 ; RV64IMB-NEXT: mul a0, a0, a1 ; RV64IMB-NEXT: addi a0, a0, 11 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = mul i64 %x, 29 %tmp1 = add i64 %tmp0, 57199 @@ -479,6 +521,7 @@ ; RV32IMB-NEXT: sh3add a1, a0, a0 ; RV32IMB-NEXT: sh3add a0, a1, a0 ; RV32IMB-NEXT: addi a0, a0, 10 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_reject_g1: @@ -487,6 +530,7 @@ ; RV64IMB-NEXT: sh3add a1, a0, a0 ; RV64IMB-NEXT: sh3add a0, a1, a0 ; RV64IMB-NEXT: addiw a0, a0, 10 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = mul i32 %x, 73 %tmp1 = add i32 %tmp0, 7310 @@ -500,6 +544,7 @@ ; RV32IMB-NEXT: sh3add a1, a0, a0 ; RV32IMB-NEXT: sh3add a0, a1, a0 ; RV32IMB-NEXT: addi a0, a0, 10 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_reject_g2: @@ -508,6 +553,7 @@ ; RV64IMB-NEXT: sh3add a1, a0, a0 ; RV64IMB-NEXT: sh3add a0, a1, a0 ; RV64IMB-NEXT: addiw a0, a0, 10 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = mul i32 %x, 73 %tmp1 = add i32 %tmp0, 7310 @@ -527,6 +573,7 @@ ; RV32IMB-NEXT: add a0, a2, a0 ; RV32IMB-NEXT: sltu a2, a0, a2 ; RV32IMB-NEXT: add a1, a1, a2 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_reject_g3: @@ -535,6 +582,7 @@ ; RV64IMB-NEXT: sh3add a1, a0, a0 ; RV64IMB-NEXT: sh3add a0, a1, a0 ; RV64IMB-NEXT: addi a0, a0, 10 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = mul i64 %x, 73 %tmp1 = add i64 %tmp0, 7310 @@ -555,6 +603,7 @@ ; RV32IMB-NEXT: addi a0, a0, 1024 ; RV32IMB-NEXT: sltu a2, a0, a2 ; RV32IMB-NEXT: add a1, a1, a2 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_infinite_loop: @@ -563,6 +612,7 @@ ; RV64IMB-NEXT: lui a1, 1 ; RV64IMB-NEXT: addiw a1, a1, -2048 ; RV64IMB-NEXT: sh3add a0, a0, a1 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = mul i64 %x, 24 %tmp1 = add i64 %tmp0, 2048 @@ -578,6 +628,7 @@ ; RV32IMB-NEXT: lui a1, 2 ; RV32IMB-NEXT: addi a1, a1, 798 ; RV32IMB-NEXT: add a0, a0, a1 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: mul3000_add8990_a: @@ -588,6 +639,7 @@ ; RV64IMB-NEXT: lui a1, 2 ; RV64IMB-NEXT: addiw a1, a1, 798 ; RV64IMB-NEXT: addw a0, a0, a1 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = mul i32 %x, 3000 %tmp1 = add i32 %tmp0, 8990 @@ -603,6 +655,7 @@ ; RV32IMB-NEXT: lui a1, 2 ; RV32IMB-NEXT: addi a1, a1, 798 ; RV32IMB-NEXT: add a0, a0, a1 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: mul3000_add8990_b: @@ -613,6 +666,7 @@ ; RV64IMB-NEXT: lui a1, 2 ; RV64IMB-NEXT: addiw a1, a1, 798 ; RV64IMB-NEXT: addw a0, a0, a1 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = mul i32 %x, 3000 %tmp1 = add i32 %tmp0, 8990 @@ -633,6 +687,7 @@ ; RV32IMB-NEXT: add a0, a2, a0 ; RV32IMB-NEXT: sltu a2, a0, a2 ; RV32IMB-NEXT: add a1, a1, a2 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: mul3000_add8990_c: @@ -643,6 +698,7 @@ ; RV64IMB-NEXT: lui a1, 2 ; RV64IMB-NEXT: addiw a1, a1, 798 ; RV64IMB-NEXT: add a0, a0, a1 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = mul i64 %x, 3000 %tmp1 = add i64 %tmp0, 8990 @@ -658,6 +714,7 @@ ; RV32IMB-NEXT: lui a1, 1048574 ; RV32IMB-NEXT: addi a1, a1, -798 ; RV32IMB-NEXT: add a0, a0, a1 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: mul3000_sub8990_a: @@ -668,6 +725,7 @@ ; RV64IMB-NEXT: lui a1, 1048574 ; RV64IMB-NEXT: addiw a1, a1, -798 ; RV64IMB-NEXT: addw a0, a0, a1 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = mul i32 %x, 3000 %tmp1 = add i32 %tmp0, -8990 @@ -683,6 +741,7 @@ ; RV32IMB-NEXT: lui a1, 1048574 ; RV32IMB-NEXT: addi a1, a1, -798 ; RV32IMB-NEXT: add a0, a0, a1 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: mul3000_sub8990_b: @@ -693,6 +752,7 @@ ; RV64IMB-NEXT: lui a1, 1048574 ; RV64IMB-NEXT: addiw a1, a1, -798 ; RV64IMB-NEXT: addw a0, a0, a1 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = mul i32 %x, 3000 %tmp1 = add i32 %tmp0, -8990 @@ -714,6 +774,7 @@ ; RV32IMB-NEXT: sltu a2, a0, a2 ; RV32IMB-NEXT: add a1, a1, a2 ; RV32IMB-NEXT: addi a1, a1, -1 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: mul3000_sub8990_c: @@ -724,6 +785,7 @@ ; RV64IMB-NEXT: lui a1, 1048574 ; RV64IMB-NEXT: addiw a1, a1, -798 ; RV64IMB-NEXT: add a0, a0, a1 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = mul i64 %x, 3000 %tmp1 = add i64 %tmp0, -8990 @@ -739,6 +801,7 @@ ; RV32IMB-NEXT: lui a1, 2 ; RV32IMB-NEXT: addi a1, a1, 798 ; RV32IMB-NEXT: add a0, a0, a1 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: mulneg3000_add8990_a: @@ -749,6 +812,7 @@ ; RV64IMB-NEXT: lui a1, 2 ; RV64IMB-NEXT: addiw a1, a1, 798 ; RV64IMB-NEXT: addw a0, a0, a1 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = mul i32 %x, -3000 %tmp1 = add i32 %tmp0, 8990 @@ -764,6 +828,7 @@ ; RV32IMB-NEXT: lui a1, 2 ; RV32IMB-NEXT: addi a1, a1, 798 ; RV32IMB-NEXT: add a0, a0, a1 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: mulneg3000_add8990_b: @@ -774,6 +839,7 @@ ; RV64IMB-NEXT: lui a1, 2 ; RV64IMB-NEXT: addiw a1, a1, 798 ; RV64IMB-NEXT: addw a0, a0, a1 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = mul i32 %x, -3000 %tmp1 = add i32 %tmp0, 8990 @@ -795,6 +861,7 @@ ; RV32IMB-NEXT: add a0, a2, a0 ; RV32IMB-NEXT: sltu a2, a0, a2 ; RV32IMB-NEXT: add a1, a1, a2 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: mulneg3000_add8990_c: @@ -805,6 +872,7 @@ ; RV64IMB-NEXT: lui a1, 2 ; RV64IMB-NEXT: addiw a1, a1, 798 ; RV64IMB-NEXT: add a0, a0, a1 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = mul i64 %x, -3000 %tmp1 = add i64 %tmp0, 8990 @@ -820,6 +888,7 @@ ; RV32IMB-NEXT: lui a1, 1048574 ; RV32IMB-NEXT: addi a1, a1, -798 ; RV32IMB-NEXT: add a0, a0, a1 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: mulneg3000_sub8990_a: @@ -830,6 +899,7 @@ ; RV64IMB-NEXT: lui a1, 1048574 ; RV64IMB-NEXT: addiw a1, a1, -798 ; RV64IMB-NEXT: addw a0, a0, a1 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = mul i32 %x, -3000 %tmp1 = add i32 %tmp0, -8990 @@ -845,6 +915,7 @@ ; RV32IMB-NEXT: lui a1, 1048574 ; RV32IMB-NEXT: addi a1, a1, -798 ; RV32IMB-NEXT: add a0, a0, a1 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: mulneg3000_sub8990_b: @@ -855,6 +926,7 @@ ; RV64IMB-NEXT: lui a1, 1048574 ; RV64IMB-NEXT: addiw a1, a1, -798 ; RV64IMB-NEXT: addw a0, a0, a1 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = mul i32 %x, -3000 %tmp1 = add i32 %tmp0, -8990 @@ -877,6 +949,7 @@ ; RV32IMB-NEXT: sltu a2, a0, a2 ; RV32IMB-NEXT: add a1, a1, a2 ; RV32IMB-NEXT: addi a1, a1, -1 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: mulneg3000_sub8990_c: @@ -887,6 +960,7 @@ ; RV64IMB-NEXT: lui a1, 1048574 ; RV64IMB-NEXT: addiw a1, a1, -798 ; RV64IMB-NEXT: add a0, a0, a1 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = mul i64 %x, -3000 %tmp1 = add i64 %tmp0, -8990 diff --git a/llvm/test/CodeGen/RISCV/addrspacecast.ll b/llvm/test/CodeGen/RISCV/addrspacecast.ll --- a/llvm/test/CodeGen/RISCV/addrspacecast.ll +++ b/llvm/test/CodeGen/RISCV/addrspacecast.ll @@ -8,11 +8,13 @@ ; RV32I-LABEL: cast0: ; RV32I: # %bb.0: ; RV32I-NEXT: sw zero, 0(a0) +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: cast0: ; RV64I: # %bb.0: ; RV64I-NEXT: sw zero, 0(a0) +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %ptr0 = addrspacecast i32 addrspace(1)* %ptr to i32 addrspace(0)* store i32 0, i32* %ptr0 @@ -28,7 +30,9 @@ ; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: call foo@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: .cfi_restore ra ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: cast1: @@ -39,7 +43,9 @@ ; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: call foo@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: .cfi_restore ra ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %castptr = addrspacecast i32* %ptr to i32 addrspace(10)* call void @foo(i32 addrspace(10)* %castptr) diff --git a/llvm/test/CodeGen/RISCV/aext-to-sext.ll b/llvm/test/CodeGen/RISCV/aext-to-sext.ll --- a/llvm/test/CodeGen/RISCV/aext-to-sext.ll +++ b/llvm/test/CodeGen/RISCV/aext-to-sext.ll @@ -62,6 +62,7 @@ ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 ; RV64I-NEXT: beq a0, a1, .LBB1_1 ; RV64I-NEXT: # %bb.2: # %bar +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret br label %bb diff --git a/llvm/test/CodeGen/RISCV/alu32.ll b/llvm/test/CodeGen/RISCV/alu32.ll --- a/llvm/test/CodeGen/RISCV/alu32.ll +++ b/llvm/test/CodeGen/RISCV/alu32.ll @@ -133,12 +133,14 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: srli a0, a0, 3 ; RV32I-NEXT: ori a0, a0, 1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: srli_demandedbits: ; RV64I: # %bb.0: ; RV64I-NEXT: srliw a0, a0, 3 ; RV64I-NEXT: ori a0, a0, 1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %2 = lshr i32 %0, 3 %3 = or i32 %2, 1 diff --git a/llvm/test/CodeGen/RISCV/bswap-ctlz-cttz-ctpop.ll b/llvm/test/CodeGen/RISCV/bswap-ctlz-cttz-ctpop.ll --- a/llvm/test/CodeGen/RISCV/bswap-ctlz-cttz-ctpop.ll +++ b/llvm/test/CodeGen/RISCV/bswap-ctlz-cttz-ctpop.ll @@ -1286,6 +1286,7 @@ ; RV32I-NEXT: srli a1, a0, 1 ; RV32I-NEXT: xor a0, a0, a1 ; RV32I-NEXT: andi a0, a0, 1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: test_parity_i32: @@ -1303,6 +1304,7 @@ ; RV64I-NEXT: srli a1, a0, 1 ; RV64I-NEXT: xor a0, a0, a1 ; RV64I-NEXT: andi a0, a0, 1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = call i32 @llvm.ctpop.i32(i32 %a) %2 = and i32 %1, 1 @@ -1325,6 +1327,7 @@ ; RV32I-NEXT: xor a0, a0, a1 ; RV32I-NEXT: andi a0, a0, 1 ; RV32I-NEXT: mv a1, zero +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: test_parity_i64: @@ -1342,6 +1345,7 @@ ; RV64I-NEXT: srli a1, a0, 1 ; RV64I-NEXT: xor a0, a0, a1 ; RV64I-NEXT: andi a0, a0, 1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = call i64 @llvm.ctpop.i64(i64 %a) %2 = and i64 %1, 1 diff --git a/llvm/test/CodeGen/RISCV/calling-conv-vector-float.ll b/llvm/test/CodeGen/RISCV/calling-conv-vector-float.ll --- a/llvm/test/CodeGen/RISCV/calling-conv-vector-float.ll +++ b/llvm/test/CodeGen/RISCV/calling-conv-vector-float.ll @@ -15,12 +15,14 @@ ; RV64-NEXT: fadd.s ft0, ft1, ft0 ; RV64-NEXT: fmv.x.w a0, ft0 ; RV64-NEXT: fmv.x.w a1, ft2 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV64LP64F-LABEL: callee_v2f32: ; RV64LP64F: # %bb.0: ; RV64LP64F-NEXT: fadd.s fa0, fa0, fa2 ; RV64LP64F-NEXT: fadd.s fa1, fa1, fa3 +; RV64LP64F-NEXT: .cfi_def_cfa_offset 0 ; RV64LP64F-NEXT: ret %z = fadd <2 x float> %x, %y ret <2 x float> %z @@ -45,6 +47,7 @@ ; RV64-NEXT: fsw ft1, 8(a0) ; RV64-NEXT: fsw ft3, 4(a0) ; RV64-NEXT: fsw ft5, 0(a0) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV64LP64F-LABEL: callee_v4f32: @@ -57,6 +60,7 @@ ; RV64LP64F-NEXT: fsw ft2, 8(a0) ; RV64LP64F-NEXT: fsw ft1, 4(a0) ; RV64LP64F-NEXT: fsw ft0, 0(a0) +; RV64LP64F-NEXT: .cfi_def_cfa_offset 0 ; RV64LP64F-NEXT: ret %z = fadd <4 x float> %x, %y ret <4 x float> %z diff --git a/llvm/test/CodeGen/RISCV/double-convert.ll b/llvm/test/CodeGen/RISCV/double-convert.ll --- a/llvm/test/CodeGen/RISCV/double-convert.ll +++ b/llvm/test/CodeGen/RISCV/double-convert.ll @@ -144,6 +144,7 @@ ; RV32IFD-NEXT: mv a0, a1 ; RV32IFD-NEXT: .LBB5_2: ; RV32IFD-NEXT: addi sp, sp, 16 +; RV32IFD-NEXT: .cfi_def_cfa_offset 0 ; RV32IFD-NEXT: ret ; ; RV64IFD-LABEL: fcvt_wu_d_multiple_use: @@ -155,6 +156,7 @@ ; RV64IFD-NEXT: # %bb.1: ; RV64IFD-NEXT: mv a0, a1 ; RV64IFD-NEXT: .LBB5_2: +; RV64IFD-NEXT: .cfi_def_cfa_offset 0 ; RV64IFD-NEXT: ret %a = fptoui double %x to i32 %b = icmp eq i32 %a, 0 @@ -639,6 +641,7 @@ ; RV32IFD-NEXT: addi a0, a0, 1 ; RV32IFD-NEXT: fcvt.d.w ft0, a0 ; RV32IFD-NEXT: fsd ft0, 0(a1) +; RV32IFD-NEXT: .cfi_def_cfa_offset 0 ; RV32IFD-NEXT: ret ; ; RV64IFD-LABEL: fcvt_d_w_demanded_bits: @@ -646,6 +649,7 @@ ; RV64IFD-NEXT: addiw a0, a0, 1 ; RV64IFD-NEXT: fcvt.d.w ft0, a0 ; RV64IFD-NEXT: fsd ft0, 0(a1) +; RV64IFD-NEXT: .cfi_def_cfa_offset 0 ; RV64IFD-NEXT: ret %3 = add i32 %0, 1 %4 = sitofp i32 %3 to double @@ -660,6 +664,7 @@ ; RV32IFD-NEXT: addi a0, a0, 1 ; RV32IFD-NEXT: fcvt.d.wu ft0, a0 ; RV32IFD-NEXT: fsd ft0, 0(a1) +; RV32IFD-NEXT: .cfi_def_cfa_offset 0 ; RV32IFD-NEXT: ret ; ; RV64IFD-LABEL: fcvt_d_wu_demanded_bits: @@ -667,6 +672,7 @@ ; RV64IFD-NEXT: addiw a0, a0, 1 ; RV64IFD-NEXT: fcvt.d.wu ft0, a0 ; RV64IFD-NEXT: fsd ft0, 0(a1) +; RV64IFD-NEXT: .cfi_def_cfa_offset 0 ; RV64IFD-NEXT: ret %3 = add i32 %0, 1 %4 = uitofp i32 %3 to double diff --git a/llvm/test/CodeGen/RISCV/exception-pointer-register.ll b/llvm/test/CodeGen/RISCV/exception-pointer-register.ll --- a/llvm/test/CodeGen/RISCV/exception-pointer-register.ll +++ b/llvm/test/CodeGen/RISCV/exception-pointer-register.ll @@ -40,7 +40,11 @@ ; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: .cfi_restore ra +; RV32I-NEXT: .cfi_restore s0 +; RV32I-NEXT: .cfi_restore s1 ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; RV32I-NEXT: .LBB0_4: # %lpad ; RV32I-NEXT: .Ltmp4: @@ -77,7 +81,11 @@ ; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload +; RV64I-NEXT: .cfi_restore ra +; RV64I-NEXT: .cfi_restore s0 +; RV64I-NEXT: .cfi_restore s1 ; RV64I-NEXT: addi sp, sp, 32 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; RV64I-NEXT: .LBB0_4: # %lpad ; RV64I-NEXT: .Ltmp4: @@ -111,10 +119,12 @@ define internal void @callee(i1* %p) { ; RV32I-LABEL: callee: ; RV32I: # %bb.0: +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: callee: ; RV64I: # %bb.0: +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ret void } diff --git a/llvm/test/CodeGen/RISCV/float-convert.ll b/llvm/test/CodeGen/RISCV/float-convert.ll --- a/llvm/test/CodeGen/RISCV/float-convert.ll +++ b/llvm/test/CodeGen/RISCV/float-convert.ll @@ -82,6 +82,7 @@ ; RV32IF-NEXT: # %bb.1: ; RV32IF-NEXT: mv a0, a1 ; RV32IF-NEXT: .LBB3_2: +; RV32IF-NEXT: .cfi_def_cfa_offset 0 ; RV32IF-NEXT: ret ; ; RV64IF-LABEL: fcvt_wu_s_multiple_use: @@ -93,6 +94,7 @@ ; RV64IF-NEXT: # %bb.1: ; RV64IF-NEXT: mv a0, a1 ; RV64IF-NEXT: .LBB3_2: +; RV64IF-NEXT: .cfi_def_cfa_offset 0 ; RV64IF-NEXT: ret %a = fptoui float %x to i32 %b = icmp eq i32 %a, 0 @@ -526,6 +528,7 @@ ; RV32IF-NEXT: addi a0, a0, 1 ; RV32IF-NEXT: fcvt.s.w ft0, a0 ; RV32IF-NEXT: fsw ft0, 0(a1) +; RV32IF-NEXT: .cfi_def_cfa_offset 0 ; RV32IF-NEXT: ret ; ; RV64IF-LABEL: fcvt_s_w_demanded_bits: @@ -533,6 +536,7 @@ ; RV64IF-NEXT: addiw a0, a0, 1 ; RV64IF-NEXT: fcvt.s.w ft0, a0 ; RV64IF-NEXT: fsw ft0, 0(a1) +; RV64IF-NEXT: .cfi_def_cfa_offset 0 ; RV64IF-NEXT: ret %3 = add i32 %0, 1 %4 = sitofp i32 %3 to float @@ -547,6 +551,7 @@ ; RV32IF-NEXT: addi a0, a0, 1 ; RV32IF-NEXT: fcvt.s.wu ft0, a0 ; RV32IF-NEXT: fsw ft0, 0(a1) +; RV32IF-NEXT: .cfi_def_cfa_offset 0 ; RV32IF-NEXT: ret ; ; RV64IF-LABEL: fcvt_s_wu_demanded_bits: @@ -554,6 +559,7 @@ ; RV64IF-NEXT: addiw a0, a0, 1 ; RV64IF-NEXT: fcvt.s.wu ft0, a0 ; RV64IF-NEXT: fsw ft0, 0(a1) +; RV64IF-NEXT: .cfi_def_cfa_offset 0 ; RV64IF-NEXT: ret %3 = add i32 %0, 1 %4 = uitofp i32 %3 to float diff --git a/llvm/test/CodeGen/RISCV/fpenv.ll b/llvm/test/CodeGen/RISCV/fpenv.ll --- a/llvm/test/CodeGen/RISCV/fpenv.ll +++ b/llvm/test/CodeGen/RISCV/fpenv.ll @@ -11,6 +11,7 @@ ; RV32IF-NEXT: addi a1, a1, 769 ; RV32IF-NEXT: srl a0, a1, a0 ; RV32IF-NEXT: andi a0, a0, 7 +; RV32IF-NEXT: .cfi_def_cfa_offset 0 ; RV32IF-NEXT: ret ; ; RV64IF-LABEL: func_01: @@ -21,6 +22,7 @@ ; RV64IF-NEXT: addiw a1, a1, 769 ; RV64IF-NEXT: srl a0, a1, a0 ; RV64IF-NEXT: andi a0, a0, 7 +; RV64IF-NEXT: .cfi_def_cfa_offset 0 ; RV64IF-NEXT: ret %rm = call i32 @llvm.flt.rounds() ret i32 %rm @@ -35,6 +37,7 @@ ; RV32IF-NEXT: srl a0, a1, a0 ; RV32IF-NEXT: andi a0, a0, 7 ; RV32IF-NEXT: fsrm a0 +; RV32IF-NEXT: .cfi_def_cfa_offset 0 ; RV32IF-NEXT: ret ; ; RV64IF-LABEL: func_02: @@ -46,6 +49,7 @@ ; RV64IF-NEXT: srl a0, a1, a0 ; RV64IF-NEXT: andi a0, a0, 7 ; RV64IF-NEXT: fsrm a0 +; RV64IF-NEXT: .cfi_def_cfa_offset 0 ; RV64IF-NEXT: ret call void @llvm.set.rounding(i32 %rm) ret void @@ -55,11 +59,13 @@ ; RV32IF-LABEL: func_03: ; RV32IF: # %bb.0: ; RV32IF-NEXT: fsrmi 1 +; RV32IF-NEXT: .cfi_def_cfa_offset 0 ; RV32IF-NEXT: ret ; ; RV64IF-LABEL: func_03: ; RV64IF: # %bb.0: ; RV64IF-NEXT: fsrmi 1 +; RV64IF-NEXT: .cfi_def_cfa_offset 0 ; RV64IF-NEXT: ret call void @llvm.set.rounding(i32 0) ret void @@ -69,11 +75,13 @@ ; RV32IF-LABEL: func_04: ; RV32IF: # %bb.0: ; RV32IF-NEXT: fsrmi 0 +; RV32IF-NEXT: .cfi_def_cfa_offset 0 ; RV32IF-NEXT: ret ; ; RV64IF-LABEL: func_04: ; RV64IF: # %bb.0: ; RV64IF-NEXT: fsrmi 0 +; RV64IF-NEXT: .cfi_def_cfa_offset 0 ; RV64IF-NEXT: ret call void @llvm.set.rounding(i32 1) ret void @@ -83,11 +91,13 @@ ; RV32IF-LABEL: func_05: ; RV32IF: # %bb.0: ; RV32IF-NEXT: fsrmi 3 +; RV32IF-NEXT: .cfi_def_cfa_offset 0 ; RV32IF-NEXT: ret ; ; RV64IF-LABEL: func_05: ; RV64IF: # %bb.0: ; RV64IF-NEXT: fsrmi 3 +; RV64IF-NEXT: .cfi_def_cfa_offset 0 ; RV64IF-NEXT: ret call void @llvm.set.rounding(i32 2) ret void @@ -97,11 +107,13 @@ ; RV32IF-LABEL: func_06: ; RV32IF: # %bb.0: ; RV32IF-NEXT: fsrmi 2 +; RV32IF-NEXT: .cfi_def_cfa_offset 0 ; RV32IF-NEXT: ret ; ; RV64IF-LABEL: func_06: ; RV64IF: # %bb.0: ; RV64IF-NEXT: fsrmi 2 +; RV64IF-NEXT: .cfi_def_cfa_offset 0 ; RV64IF-NEXT: ret call void @llvm.set.rounding(i32 3) ret void @@ -111,11 +123,13 @@ ; RV32IF-LABEL: func_07: ; RV32IF: # %bb.0: ; RV32IF-NEXT: fsrmi 4 +; RV32IF-NEXT: .cfi_def_cfa_offset 0 ; RV32IF-NEXT: ret ; ; RV64IF-LABEL: func_07: ; RV64IF: # %bb.0: ; RV64IF-NEXT: fsrmi 4 +; RV64IF-NEXT: .cfi_def_cfa_offset 0 ; RV64IF-NEXT: ret call void @llvm.set.rounding(i32 4) ret void diff --git a/llvm/test/CodeGen/RISCV/frame-info.ll b/llvm/test/CodeGen/RISCV/frame-info.ll --- a/llvm/test/CodeGen/RISCV/frame-info.ll +++ b/llvm/test/CodeGen/RISCV/frame-info.ll @@ -11,10 +11,12 @@ define void @trivial() { ; RV32-LABEL: trivial: ; RV32: # %bb.0: +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: trivial: ; RV64: # %bb.0: +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32-WITHFP-LABEL: trivial: @@ -28,12 +30,13 @@ ; RV32-WITHFP-NEXT: addi s0, sp, 16 ; RV32-WITHFP-NEXT: .cfi_def_cfa s0, 0 ; RV32-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32-WITHFP-NEXT: .cfi_def_cfa sp, 16 ; RV32-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32-WITHFP-NEXT: .cfi_restore ra +; RV32-WITHFP-NEXT: .cfi_restore s0 ; RV32-WITHFP-NEXT: addi sp, sp, 16 +; RV32-WITHFP-NEXT: .cfi_def_cfa_offset 0 ; RV32-WITHFP-NEXT: ret -; -; RV64-WITHFP-LABEL: trivial: -; RV64-WITHFP: # %bb.0: ; RV64-WITHFP-NEXT: addi sp, sp, -16 ; RV64-WITHFP-NEXT: .cfi_def_cfa_offset 16 ; RV64-WITHFP-NEXT: sd ra, 8(sp) # 8-byte Folded Spill @@ -63,12 +66,16 @@ ; RV32-NEXT: addi a0, a0, 15 ; RV32-NEXT: andi a0, a0, -16 ; RV32-NEXT: sub a0, sp, a0 -; RV32-NEXT: mv sp, a0 +; RV32-NEXT: mv sp, a0 ; RV32-NEXT: call callee_with_args@plt ; RV32-NEXT: addi sp, s0, -16 ; RV32-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32-NEXT: .cfi_def_cfa sp, 16 ; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32-NEXT: .cfi_restore ra +; RV32-NEXT: .cfi_restore s0 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: stack_alloc: @@ -90,8 +97,12 @@ ; RV64-NEXT: call callee_with_args@plt ; RV64-NEXT: addi sp, s0, -16 ; RV64-NEXT: ld s0, 0(sp) # 8-byte Folded Reload +; RV64-NEXT: .cfi_def_cfa sp, 16 ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64-NEXT: .cfi_restore ra +; RV64-NEXT: .cfi_restore s0 ; RV64-NEXT: addi sp, sp, 16 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32-WITHFP-LABEL: stack_alloc: @@ -111,8 +122,12 @@ ; RV32-WITHFP-NEXT: call callee_with_args@plt ; RV32-WITHFP-NEXT: addi sp, s0, -16 ; RV32-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32-WITHFP-NEXT: .cfi_def_cfa sp, 16 ; RV32-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32-WITHFP-NEXT: .cfi_restore ra +; RV32-WITHFP-NEXT: .cfi_restore s0 ; RV32-WITHFP-NEXT: addi sp, sp, 16 +; RV32-WITHFP-NEXT: .cfi_def_cfa_offset 0 ; RV32-WITHFP-NEXT: ret ; ; RV64-WITHFP-LABEL: stack_alloc: @@ -157,7 +172,9 @@ ; RV32-NEXT: .cfi_offset ra, -4 ; RV32-NEXT: call callee2@plt ; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32-NEXT: .cfi_restore ra ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: branch_and_tail_call: @@ -173,7 +190,9 @@ ; RV64-NEXT: .cfi_offset ra, -8 ; RV64-NEXT: call callee2@plt ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64-NEXT: .cfi_restore ra ; RV64-NEXT: addi sp, sp, 16 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32-WITHFP-LABEL: branch_and_tail_call: @@ -193,8 +212,12 @@ ; RV32-WITHFP-NEXT: .cfi_def_cfa s0, 0 ; RV32-WITHFP-NEXT: call callee2@plt ; RV32-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32-WITHFP-NEXT: .cfi_def_cfa sp, 16 ; RV32-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32-WITHFP-NEXT: .cfi_restore ra +; RV32-WITHFP-NEXT: .cfi_restore s0 ; RV32-WITHFP-NEXT: addi sp, sp, 16 +; RV32-WITHFP-NEXT: .cfi_def_cfa_offset 0 ; RV32-WITHFP-NEXT: ret ; ; RV64-WITHFP-LABEL: branch_and_tail_call: diff --git a/llvm/test/CodeGen/RISCV/half-convert.ll b/llvm/test/CodeGen/RISCV/half-convert.ll --- a/llvm/test/CodeGen/RISCV/half-convert.ll +++ b/llvm/test/CodeGen/RISCV/half-convert.ll @@ -146,6 +146,7 @@ ; RV32IZFH-NEXT: # %bb.1: ; RV32IZFH-NEXT: mv a0, a1 ; RV32IZFH-NEXT: .LBB3_2: +; RV32IZFH-NEXT: .cfi_def_cfa_offset 0 ; RV32IZFH-NEXT: ret ; ; RV32IDZFH-LABEL: fcvt_ui_h_multiple_use: @@ -156,6 +157,7 @@ ; RV32IDZFH-NEXT: # %bb.1: ; RV32IDZFH-NEXT: mv a0, a1 ; RV32IDZFH-NEXT: .LBB3_2: +; RV32IDZFH-NEXT: .cfi_def_cfa_offset 0 ; RV32IDZFH-NEXT: ret ; ; RV64IZFH-LABEL: fcvt_ui_h_multiple_use: @@ -166,6 +168,7 @@ ; RV64IZFH-NEXT: # %bb.1: ; RV64IZFH-NEXT: mv a0, a1 ; RV64IZFH-NEXT: .LBB3_2: +; RV64IZFH-NEXT: .cfi_def_cfa_offset 0 ; RV64IZFH-NEXT: ret ; ; RV64IDZFH-LABEL: fcvt_ui_h_multiple_use: @@ -176,6 +179,7 @@ ; RV64IDZFH-NEXT: # %bb.1: ; RV64IDZFH-NEXT: mv a0, a1 ; RV64IDZFH-NEXT: .LBB3_2: +; RV64IDZFH-NEXT: .cfi_def_cfa_offset 0 ; RV64IDZFH-NEXT: ret %a = fptoui half %x to i32 %b = icmp eq i32 %a, 0 @@ -1154,6 +1158,7 @@ ; RV32IZFH-NEXT: addi a0, a0, 1 ; RV32IZFH-NEXT: fcvt.h.w ft0, a0 ; RV32IZFH-NEXT: fsh ft0, 0(a1) +; RV32IZFH-NEXT: .cfi_def_cfa_offset 0 ; RV32IZFH-NEXT: ret ; ; RV32IDZFH-LABEL: fcvt_h_w_demanded_bits: @@ -1161,6 +1166,7 @@ ; RV32IDZFH-NEXT: addi a0, a0, 1 ; RV32IDZFH-NEXT: fcvt.h.w ft0, a0 ; RV32IDZFH-NEXT: fsh ft0, 0(a1) +; RV32IDZFH-NEXT: .cfi_def_cfa_offset 0 ; RV32IDZFH-NEXT: ret ; ; RV64IZFH-LABEL: fcvt_h_w_demanded_bits: @@ -1168,6 +1174,7 @@ ; RV64IZFH-NEXT: addiw a0, a0, 1 ; RV64IZFH-NEXT: fcvt.h.w ft0, a0 ; RV64IZFH-NEXT: fsh ft0, 0(a1) +; RV64IZFH-NEXT: .cfi_def_cfa_offset 0 ; RV64IZFH-NEXT: ret ; ; RV64IDZFH-LABEL: fcvt_h_w_demanded_bits: @@ -1175,6 +1182,7 @@ ; RV64IDZFH-NEXT: addiw a0, a0, 1 ; RV64IDZFH-NEXT: fcvt.h.w ft0, a0 ; RV64IDZFH-NEXT: fsh ft0, 0(a1) +; RV64IDZFH-NEXT: .cfi_def_cfa_offset 0 ; RV64IDZFH-NEXT: ret %3 = add i32 %0, 1 %4 = sitofp i32 %3 to half @@ -1189,6 +1197,7 @@ ; RV32IZFH-NEXT: addi a0, a0, 1 ; RV32IZFH-NEXT: fcvt.h.wu ft0, a0 ; RV32IZFH-NEXT: fsh ft0, 0(a1) +; RV32IZFH-NEXT: .cfi_def_cfa_offset 0 ; RV32IZFH-NEXT: ret ; ; RV32IDZFH-LABEL: fcvt_h_wu_demanded_bits: @@ -1196,6 +1205,7 @@ ; RV32IDZFH-NEXT: addi a0, a0, 1 ; RV32IDZFH-NEXT: fcvt.h.wu ft0, a0 ; RV32IDZFH-NEXT: fsh ft0, 0(a1) +; RV32IDZFH-NEXT: .cfi_def_cfa_offset 0 ; RV32IDZFH-NEXT: ret ; ; RV64IZFH-LABEL: fcvt_h_wu_demanded_bits: @@ -1203,6 +1213,7 @@ ; RV64IZFH-NEXT: addiw a0, a0, 1 ; RV64IZFH-NEXT: fcvt.h.wu ft0, a0 ; RV64IZFH-NEXT: fsh ft0, 0(a1) +; RV64IZFH-NEXT: .cfi_def_cfa_offset 0 ; RV64IZFH-NEXT: ret ; ; RV64IDZFH-LABEL: fcvt_h_wu_demanded_bits: @@ -1210,6 +1221,7 @@ ; RV64IDZFH-NEXT: addiw a0, a0, 1 ; RV64IDZFH-NEXT: fcvt.h.wu ft0, a0 ; RV64IDZFH-NEXT: fsh ft0, 0(a1) +; RV64IDZFH-NEXT: .cfi_def_cfa_offset 0 ; RV64IDZFH-NEXT: ret %3 = add i32 %0, 1 %4 = uitofp i32 %3 to half diff --git a/llvm/test/CodeGen/RISCV/inline-asm-S-constraint.ll b/llvm/test/CodeGen/RISCV/inline-asm-S-constraint.ll --- a/llvm/test/CodeGen/RISCV/inline-asm-S-constraint.ll +++ b/llvm/test/CodeGen/RISCV/inline-asm-S-constraint.ll @@ -11,6 +11,7 @@ ; RV32-NEXT: lui a0, %hi(var) ; RV32-NEXT: addi a0, a0, %lo(var) ; RV32-NEXT: #NO_APP +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: constraint_S: @@ -19,6 +20,7 @@ ; RV64-NEXT: lui a0, %hi(var) ; RV64-NEXT: addi a0, a0, %lo(var) ; RV64-NEXT: #NO_APP +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ret = tail call i8* asm "lui $0, %hi($1)\0Aaddi $0, $0, %lo($1)", "=r,S"(i32* nonnull @var) ret i8* %ret @@ -34,6 +36,7 @@ ; RV32-NEXT: lui a0, %hi(.Ltmp0) ; RV32-NEXT: addi a0, a0, %lo(.Ltmp0) ; RV32-NEXT: #NO_APP +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: constraint_S_label: @@ -44,6 +47,7 @@ ; RV64-NEXT: lui a0, %hi(.Ltmp0) ; RV64-NEXT: addi a0, a0, %lo(.Ltmp0) ; RV64-NEXT: #NO_APP +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret entry: br label %L1 diff --git a/llvm/test/CodeGen/RISCV/large-stack.ll b/llvm/test/CodeGen/RISCV/large-stack.ll --- a/llvm/test/CodeGen/RISCV/large-stack.ll +++ b/llvm/test/CodeGen/RISCV/large-stack.ll @@ -16,6 +16,7 @@ ; RV32I-FPELIM-NEXT: lui a0, 74565 ; RV32I-FPELIM-NEXT: addi a0, a0, 1664 ; RV32I-FPELIM-NEXT: add sp, sp, a0 +; RV32I-FPELIM-NEXT: .cfi_def_cfa_offset 0 ; RV32I-FPELIM-NEXT: ret ; ; RV32I-WITHFP-LABEL: test: @@ -35,8 +36,12 @@ ; RV32I-WITHFP-NEXT: addi a0, a0, -352 ; RV32I-WITHFP-NEXT: add sp, sp, a0 ; RV32I-WITHFP-NEXT: lw s0, 2024(sp) # 4-byte Folded Reload +; RV32I-WITHFP-NEXT: .cfi_def_cfa sp, 2032 ; RV32I-WITHFP-NEXT: lw ra, 2028(sp) # 4-byte Folded Reload +; RV32I-WITHFP-NEXT: .cfi_restore ra +; RV32I-WITHFP-NEXT: .cfi_restore s0 ; RV32I-WITHFP-NEXT: addi sp, sp, 2032 +; RV32I-WITHFP-NEXT: .cfi_def_cfa_offset 0 ; RV32I-WITHFP-NEXT: ret %tmp = alloca [ 305419896 x i8 ] , align 4 ret void @@ -74,9 +79,13 @@ ; RV32I-FPELIM-NEXT: lui a0, 97 ; RV32I-FPELIM-NEXT: addi a0, a0, 672 ; RV32I-FPELIM-NEXT: add sp, sp, a0 +; RV32I-FPELIM-NEXT: .cfi_def_cfa_offset -2032 ; RV32I-FPELIM-NEXT: lw s1, 2024(sp) # 4-byte Folded Reload ; RV32I-FPELIM-NEXT: lw s0, 2028(sp) # 4-byte Folded Reload +; RV32I-FPELIM-NEXT: .cfi_restore s0 +; RV32I-FPELIM-NEXT: .cfi_restore s1 ; RV32I-FPELIM-NEXT: addi sp, sp, 2032 +; RV32I-FPELIM-NEXT: .cfi_def_cfa_offset 0 ; RV32I-FPELIM-NEXT: ret ; ; RV32I-WITHFP-LABEL: test_emergency_spill_slot: @@ -117,8 +126,14 @@ ; RV32I-WITHFP-NEXT: lw s2, 2016(sp) # 4-byte Folded Reload ; RV32I-WITHFP-NEXT: lw s1, 2020(sp) # 4-byte Folded Reload ; RV32I-WITHFP-NEXT: lw s0, 2024(sp) # 4-byte Folded Reload +; RV32I-WITHFP-NEXT: .cfi_def_cfa sp, 2032 ; RV32I-WITHFP-NEXT: lw ra, 2028(sp) # 4-byte Folded Reload +; RV32I-WITHFP-NEXT: .cfi_restore ra +; RV32I-WITHFP-NEXT: .cfi_restore s0 +; RV32I-WITHFP-NEXT: .cfi_restore s1 +; RV32I-WITHFP-NEXT: .cfi_restore s2 ; RV32I-WITHFP-NEXT: addi sp, sp, 2032 +; RV32I-WITHFP-NEXT: .cfi_def_cfa_offset 0 ; RV32I-WITHFP-NEXT: ret %data = alloca [ 100000 x i32 ] , align 4 %ptr = getelementptr inbounds [100000 x i32], [100000 x i32]* %data, i32 0, i32 80000 diff --git a/llvm/test/CodeGen/RISCV/neg-abs.ll b/llvm/test/CodeGen/RISCV/neg-abs.ll --- a/llvm/test/CodeGen/RISCV/neg-abs.ll +++ b/llvm/test/CodeGen/RISCV/neg-abs.ll @@ -17,6 +17,7 @@ ; RV32I-NEXT: srai a1, a0, 31 ; RV32I-NEXT: xor a0, a0, a1 ; RV32I-NEXT: sub a0, a1, a0 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IBT-LABEL: neg_abs32: @@ -24,6 +25,7 @@ ; RV32IBT-NEXT: srai a1, a0, 31 ; RV32IBT-NEXT: xor a0, a0, a1 ; RV32IBT-NEXT: sub a0, a1, a0 +; RV32IBT-NEXT: .cfi_def_cfa_offset 0 ; RV32IBT-NEXT: ret ; ; RV64I-LABEL: neg_abs32: @@ -31,6 +33,7 @@ ; RV64I-NEXT: sraiw a1, a0, 31 ; RV64I-NEXT: xor a0, a0, a1 ; RV64I-NEXT: subw a0, a1, a0 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IBT-LABEL: neg_abs32: @@ -38,6 +41,7 @@ ; RV64IBT-NEXT: sraiw a1, a0, 31 ; RV64IBT-NEXT: xor a0, a0, a1 ; RV64IBT-NEXT: subw a0, a1, a0 +; RV64IBT-NEXT: .cfi_def_cfa_offset 0 ; RV64IBT-NEXT: ret %abs = tail call i32 @llvm.abs.i32(i32 %x, i1 true) %neg = sub nsw i32 0, %abs @@ -50,6 +54,7 @@ ; RV32I-NEXT: srai a1, a0, 31 ; RV32I-NEXT: xor a0, a0, a1 ; RV32I-NEXT: sub a0, a1, a0 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IBT-LABEL: select_neg_abs32: @@ -57,6 +62,7 @@ ; RV32IBT-NEXT: srai a1, a0, 31 ; RV32IBT-NEXT: xor a0, a0, a1 ; RV32IBT-NEXT: sub a0, a1, a0 +; RV32IBT-NEXT: .cfi_def_cfa_offset 0 ; RV32IBT-NEXT: ret ; ; RV64I-LABEL: select_neg_abs32: @@ -64,6 +70,7 @@ ; RV64I-NEXT: sraiw a1, a0, 31 ; RV64I-NEXT: xor a0, a0, a1 ; RV64I-NEXT: subw a0, a1, a0 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IBT-LABEL: select_neg_abs32: @@ -71,6 +78,7 @@ ; RV64IBT-NEXT: sraiw a1, a0, 31 ; RV64IBT-NEXT: xor a0, a0, a1 ; RV64IBT-NEXT: subw a0, a1, a0 +; RV64IBT-NEXT: .cfi_def_cfa_offset 0 ; RV64IBT-NEXT: ret %1 = icmp slt i32 %x, 0 %2 = sub nsw i32 0, %x @@ -88,6 +96,7 @@ ; RV32I-NEXT: sub a1, a2, a1 ; RV32I-NEXT: sub a1, a1, a3 ; RV32I-NEXT: sub a0, a2, a0 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IBT-LABEL: neg_abs64: @@ -99,6 +108,7 @@ ; RV32IBT-NEXT: sub a1, a2, a1 ; RV32IBT-NEXT: sub a1, a1, a3 ; RV32IBT-NEXT: sub a0, a2, a0 +; RV32IBT-NEXT: .cfi_def_cfa_offset 0 ; RV32IBT-NEXT: ret ; ; RV64I-LABEL: neg_abs64: @@ -106,6 +116,7 @@ ; RV64I-NEXT: srai a1, a0, 63 ; RV64I-NEXT: xor a0, a0, a1 ; RV64I-NEXT: sub a0, a1, a0 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IBT-LABEL: neg_abs64: @@ -113,6 +124,7 @@ ; RV64IBT-NEXT: srai a1, a0, 63 ; RV64IBT-NEXT: xor a0, a0, a1 ; RV64IBT-NEXT: sub a0, a1, a0 +; RV64IBT-NEXT: .cfi_def_cfa_offset 0 ; RV64IBT-NEXT: ret %abs = tail call i64 @llvm.abs.i64(i64 %x, i1 true) %neg = sub nsw i64 0, %abs @@ -129,6 +141,7 @@ ; RV32I-NEXT: sub a1, a2, a1 ; RV32I-NEXT: sub a1, a1, a3 ; RV32I-NEXT: sub a0, a2, a0 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IBT-LABEL: select_neg_abs64: @@ -140,6 +153,7 @@ ; RV32IBT-NEXT: sub a1, a2, a1 ; RV32IBT-NEXT: sub a1, a1, a3 ; RV32IBT-NEXT: sub a0, a2, a0 +; RV32IBT-NEXT: .cfi_def_cfa_offset 0 ; RV32IBT-NEXT: ret ; ; RV64I-LABEL: select_neg_abs64: @@ -147,6 +161,7 @@ ; RV64I-NEXT: srai a1, a0, 63 ; RV64I-NEXT: xor a0, a0, a1 ; RV64I-NEXT: sub a0, a1, a0 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IBT-LABEL: select_neg_abs64: @@ -154,6 +169,7 @@ ; RV64IBT-NEXT: srai a1, a0, 63 ; RV64IBT-NEXT: xor a0, a0, a1 ; RV64IBT-NEXT: sub a0, a1, a0 +; RV64IBT-NEXT: .cfi_def_cfa_offset 0 ; RV64IBT-NEXT: ret %1 = icmp slt i64 %x, 0 %2 = sub nsw i64 0, %x diff --git a/llvm/test/CodeGen/RISCV/patchable-function-entry.ll b/llvm/test/CodeGen/RISCV/patchable-function-entry.ll --- a/llvm/test/CodeGen/RISCV/patchable-function-entry.ll +++ b/llvm/test/CodeGen/RISCV/patchable-function-entry.ll @@ -18,8 +18,10 @@ ; CHECK-LABEL: f1: ; CHECK-NEXT: .Lfunc_begin1: ; NORVC: addi zero, zero, 0 +; NORVC-NEXT: cfi_def_cfa_offset 0 ; NORVC-NEXT: jalr zero, 0(ra) ; RVC: c.nop +; RVC-NEXT: cfi_def_cfa_offset 0 ; RVC-NEXT: c.jr ra ; CHECK: .section __patchable_function_entries,"awo",@progbits,f1{{$}} ; 32: .p2align 2 @@ -34,8 +36,10 @@ ; CHECK-LABEL: f5: ; CHECK-NEXT: .Lfunc_begin2: ; NORVC-COUNT-5: addi zero, zero, 0 +; NORVC-NEXT: cfi_def_cfa_offset 0 ; NORVC-NEXT: jalr zero, 0(ra) ; RVC-COUNT-5: c.nop +; RVC-NEXT: cfi_def_cfa_offset 0 ; RVC-NEXT: c.jr ra ; CHECK: .section __patchable_function_entries,"aGwo",@progbits,f5,comdat,f5{{$}} ; RV32: .p2align 2 diff --git a/llvm/test/CodeGen/RISCV/rv32zba.ll b/llvm/test/CodeGen/RISCV/rv32zba.ll --- a/llvm/test/CodeGen/RISCV/rv32zba.ll +++ b/llvm/test/CodeGen/RISCV/rv32zba.ll @@ -10,12 +10,14 @@ ; RV32I-NEXT: slli a0, a0, 1 ; RV32I-NEXT: add a0, a2, a0 ; RV32I-NEXT: lh a0, 0(a0) +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: sh1add: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh1add a0, a0, a2 ; RV32ZBA-NEXT: lh a0, 0(a0) +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %3 = getelementptr inbounds i16, i16* %1, i64 %0 %4 = load i16, i16* %3 @@ -28,12 +30,14 @@ ; RV32I-NEXT: slli a0, a0, 2 ; RV32I-NEXT: add a0, a2, a0 ; RV32I-NEXT: lw a0, 0(a0) +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: sh2add: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh2add a0, a0, a2 ; RV32ZBA-NEXT: lw a0, 0(a0) +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %3 = getelementptr inbounds i32, i32* %1, i64 %0 %4 = load i32, i32* %3 @@ -47,6 +51,7 @@ ; RV32I-NEXT: add a1, a2, a0 ; RV32I-NEXT: lw a0, 0(a1) ; RV32I-NEXT: lw a1, 4(a1) +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: sh3add: @@ -54,6 +59,7 @@ ; RV32ZBA-NEXT: sh3add a1, a0, a2 ; RV32ZBA-NEXT: lw a0, 0(a1) ; RV32ZBA-NEXT: lw a1, 4(a1) +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %3 = getelementptr inbounds i64, i64* %1, i64 %0 %4 = load i64, i64* %3 @@ -66,12 +72,14 @@ ; RV32I-NEXT: addi a2, zero, 6 ; RV32I-NEXT: mul a0, a0, a2 ; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: addmul6: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh1add a0, a0, a0 ; RV32ZBA-NEXT: sh1add a0, a0, a1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 6 %d = add i32 %c, %b @@ -84,12 +92,14 @@ ; RV32I-NEXT: addi a2, zero, 10 ; RV32I-NEXT: mul a0, a0, a2 ; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: addmul10: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh2add a0, a0, a0 ; RV32ZBA-NEXT: sh1add a0, a0, a1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 10 %d = add i32 %c, %b @@ -102,12 +112,14 @@ ; RV32I-NEXT: addi a2, zero, 12 ; RV32I-NEXT: mul a0, a0, a2 ; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: addmul12: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh1add a0, a0, a0 ; RV32ZBA-NEXT: sh2add a0, a0, a1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 12 %d = add i32 %c, %b @@ -120,12 +132,14 @@ ; RV32I-NEXT: addi a2, zero, 18 ; RV32I-NEXT: mul a0, a0, a2 ; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: addmul18: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh3add a0, a0, a0 ; RV32ZBA-NEXT: sh1add a0, a0, a1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 18 %d = add i32 %c, %b @@ -138,12 +152,14 @@ ; RV32I-NEXT: addi a2, zero, 20 ; RV32I-NEXT: mul a0, a0, a2 ; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: addmul20: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh2add a0, a0, a0 ; RV32ZBA-NEXT: sh2add a0, a0, a1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 20 %d = add i32 %c, %b @@ -156,12 +172,14 @@ ; RV32I-NEXT: addi a2, zero, 24 ; RV32I-NEXT: mul a0, a0, a2 ; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: addmul24: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh1add a0, a0, a0 ; RV32ZBA-NEXT: sh3add a0, a0, a1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 24 %d = add i32 %c, %b @@ -174,12 +192,14 @@ ; RV32I-NEXT: addi a2, zero, 36 ; RV32I-NEXT: mul a0, a0, a2 ; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: addmul36: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh3add a0, a0, a0 ; RV32ZBA-NEXT: sh2add a0, a0, a1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 36 %d = add i32 %c, %b @@ -192,12 +212,14 @@ ; RV32I-NEXT: addi a2, zero, 40 ; RV32I-NEXT: mul a0, a0, a2 ; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: addmul40: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh2add a0, a0, a0 ; RV32ZBA-NEXT: sh3add a0, a0, a1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 40 %d = add i32 %c, %b @@ -210,12 +232,14 @@ ; RV32I-NEXT: addi a2, zero, 72 ; RV32I-NEXT: mul a0, a0, a2 ; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: addmul72: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh3add a0, a0, a0 ; RV32ZBA-NEXT: sh3add a0, a0, a1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 72 %d = add i32 %c, %b @@ -227,12 +251,14 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi a1, zero, 96 ; RV32I-NEXT: mul a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: mul96: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh1add a0, a0, a0 ; RV32ZBA-NEXT: slli a0, a0, 5 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 96 ret i32 %c @@ -243,12 +269,14 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi a1, zero, 160 ; RV32I-NEXT: mul a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: mul160: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh2add a0, a0, a0 ; RV32ZBA-NEXT: slli a0, a0, 5 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 160 ret i32 %c @@ -259,12 +287,14 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi a1, zero, 288 ; RV32I-NEXT: mul a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: mul288: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh3add a0, a0, a0 ; RV32ZBA-NEXT: slli a0, a0, 5 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 288 ret i32 %c @@ -275,12 +305,14 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi a1, zero, 258 ; RV32I-NEXT: mul a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: mul258: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: addi a1, zero, 258 ; RV32ZBA-NEXT: mul a0, a0, a1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 258 ret i32 %c @@ -291,12 +323,14 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi a1, zero, 260 ; RV32I-NEXT: mul a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: mul260: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: addi a1, zero, 260 ; RV32ZBA-NEXT: mul a0, a0, a1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 260 ret i32 %c @@ -307,12 +341,14 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi a1, zero, 264 ; RV32I-NEXT: mul a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: mul264: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: addi a1, zero, 264 ; RV32ZBA-NEXT: mul a0, a0, a1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 264 ret i32 %c @@ -323,12 +359,14 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi a1, zero, 11 ; RV32I-NEXT: mul a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: mul11: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh2add a1, a0, a0 ; RV32ZBA-NEXT: sh1add a0, a1, a0 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 11 ret i32 %c @@ -339,12 +377,14 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi a1, zero, 19 ; RV32I-NEXT: mul a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: mul19: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh3add a1, a0, a0 ; RV32ZBA-NEXT: sh1add a0, a1, a0 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 19 ret i32 %c @@ -355,12 +395,14 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi a1, zero, 13 ; RV32I-NEXT: mul a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: mul13: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh1add a1, a0, a0 ; RV32ZBA-NEXT: sh2add a0, a1, a0 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 13 ret i32 %c @@ -371,12 +413,14 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi a1, zero, 21 ; RV32I-NEXT: mul a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: mul21: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh2add a1, a0, a0 ; RV32ZBA-NEXT: sh2add a0, a1, a0 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 21 ret i32 %c @@ -387,12 +431,14 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi a1, zero, 37 ; RV32I-NEXT: mul a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: mul37: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh3add a1, a0, a0 ; RV32ZBA-NEXT: sh2add a0, a1, a0 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 37 ret i32 %c @@ -403,12 +449,14 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi a1, zero, 25 ; RV32I-NEXT: mul a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: mul25: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh1add a1, a0, a0 ; RV32ZBA-NEXT: sh3add a0, a1, a0 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 25 ret i32 %c @@ -419,12 +467,14 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi a1, zero, 41 ; RV32I-NEXT: mul a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: mul41: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh2add a1, a0, a0 ; RV32ZBA-NEXT: sh3add a0, a1, a0 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 41 ret i32 %c @@ -435,12 +485,14 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi a1, zero, 73 ; RV32I-NEXT: mul a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: mul73: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh3add a1, a0, a0 ; RV32ZBA-NEXT: sh3add a0, a1, a0 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 73 ret i32 %c @@ -451,12 +503,14 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi a1, zero, 27 ; RV32I-NEXT: mul a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: mul27: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh3add a0, a0, a0 ; RV32ZBA-NEXT: sh1add a0, a0, a0 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 27 ret i32 %c @@ -467,12 +521,14 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi a1, zero, 45 ; RV32I-NEXT: mul a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: mul45: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh3add a0, a0, a0 ; RV32ZBA-NEXT: sh2add a0, a0, a0 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 45 ret i32 %c @@ -483,12 +539,14 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi a1, zero, 81 ; RV32I-NEXT: mul a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: mul81: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh3add a0, a0, a0 ; RV32ZBA-NEXT: sh3add a0, a0, a0 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 81 ret i32 %c @@ -500,12 +558,14 @@ ; RV32I-NEXT: lui a1, 1 ; RV32I-NEXT: addi a1, a1, 2 ; RV32I-NEXT: mul a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: mul4098: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: slli a1, a0, 12 ; RV32ZBA-NEXT: sh1add a0, a0, a1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 4098 ret i32 %c @@ -517,12 +577,14 @@ ; RV32I-NEXT: lui a1, 1 ; RV32I-NEXT: addi a1, a1, 4 ; RV32I-NEXT: mul a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: mul4100: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: slli a1, a0, 12 ; RV32ZBA-NEXT: sh2add a0, a0, a1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 4100 ret i32 %c @@ -534,12 +596,14 @@ ; RV32I-NEXT: lui a1, 1 ; RV32I-NEXT: addi a1, a1, 8 ; RV32I-NEXT: mul a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: mul4104: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: slli a1, a0, 12 ; RV32ZBA-NEXT: sh3add a0, a0, a1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 4104 ret i32 %c @@ -551,12 +615,14 @@ ; RV32I-NEXT: lui a1, 1 ; RV32I-NEXT: addi a1, a1, 8 ; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: add4104: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: addi a1, zero, 1026 ; RV32ZBA-NEXT: sh2add a0, a1, a0 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = add i32 %a, 4104 ret i32 %c @@ -568,12 +634,14 @@ ; RV32I-NEXT: lui a1, 2 ; RV32I-NEXT: addi a1, a1, 16 ; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: add8208: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: addi a1, zero, 1026 ; RV32ZBA-NEXT: sh3add a0, a1, a0 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = add i32 %a, 8208 ret i32 %c @@ -585,12 +653,14 @@ ; RV32I-NEXT: slli a0, a0, 5 ; RV32I-NEXT: slli a1, a1, 6 ; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: addshl_5_6: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh1add a0, a1, a0 ; RV32ZBA-NEXT: slli a0, a0, 5 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = shl i32 %a, 5 %d = shl i32 %b, 6 @@ -604,12 +674,14 @@ ; RV32I-NEXT: slli a0, a0, 5 ; RV32I-NEXT: slli a1, a1, 7 ; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: addshl_5_7: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh2add a0, a1, a0 ; RV32ZBA-NEXT: slli a0, a0, 5 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = shl i32 %a, 5 %d = shl i32 %b, 7 @@ -623,12 +695,14 @@ ; RV32I-NEXT: slli a0, a0, 5 ; RV32I-NEXT: slli a1, a1, 8 ; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: addshl_5_8: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh3add a0, a1, a0 ; RV32ZBA-NEXT: slli a0, a0, 5 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = shl i32 %a, 5 %d = shl i32 %b, 8 diff --git a/llvm/test/CodeGen/RISCV/rv32zbb.ll b/llvm/test/CodeGen/RISCV/rv32zbb.ll --- a/llvm/test/CodeGen/RISCV/rv32zbb.ll +++ b/llvm/test/CodeGen/RISCV/rv32zbb.ll @@ -734,12 +734,14 @@ ; RV32I-NEXT: srai a1, a0, 31 ; RV32I-NEXT: add a0, a0, a1 ; RV32I-NEXT: xor a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBB-LABEL: abs_i32: ; RV32ZBB: # %bb.0: ; RV32ZBB-NEXT: neg a1, a0 ; RV32ZBB-NEXT: max a0, a0, a1 +; RV32ZBB-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBB-NEXT: ret %abs = tail call i32 @llvm.abs.i32(i32 %x, i1 true) ret i32 %abs @@ -757,6 +759,7 @@ ; RV32I-NEXT: add a1, a1, a2 ; RV32I-NEXT: neg a1, a1 ; RV32I-NEXT: .LBB19_2: +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBB-LABEL: abs_i64: @@ -768,6 +771,7 @@ ; RV32ZBB-NEXT: add a1, a1, a2 ; RV32ZBB-NEXT: neg a1, a1 ; RV32ZBB-NEXT: .LBB19_2: +; RV32ZBB-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBB-NEXT: ret %abs = tail call i64 @llvm.abs.i64(i64 %x, i1 true) ret i64 %abs @@ -861,6 +865,7 @@ ; RV32I-NEXT: or a0, a0, a3 ; RV32I-NEXT: or a1, a0, a1 ; RV32I-NEXT: mv a0, a2 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBB-LABEL: bswap_i64: @@ -868,6 +873,7 @@ ; RV32ZBB-NEXT: rev8 a2, a1 ; RV32ZBB-NEXT: rev8 a1, a0 ; RV32ZBB-NEXT: mv a0, a2 +; RV32ZBB-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBB-NEXT: ret %1 = call i64 @llvm.bswap.i64(i64 %a) ret i64 %1 diff --git a/llvm/test/CodeGen/RISCV/rv64i-demanded-bits.ll b/llvm/test/CodeGen/RISCV/rv64i-demanded-bits.ll --- a/llvm/test/CodeGen/RISCV/rv64i-demanded-bits.ll +++ b/llvm/test/CodeGen/RISCV/rv64i-demanded-bits.ll @@ -15,6 +15,7 @@ ; CHECK-NEXT: addw a0, a0, a2 ; CHECK-NEXT: addiw a0, a0, 1 ; CHECK-NEXT: sllw a0, a0, a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %b = mul i32 %x, %x %c = add i32 %b, 1 diff --git a/llvm/test/CodeGen/RISCV/rv64zba.ll b/llvm/test/CodeGen/RISCV/rv64zba.ll --- a/llvm/test/CodeGen/RISCV/rv64zba.ll +++ b/llvm/test/CodeGen/RISCV/rv64zba.ll @@ -28,6 +28,7 @@ ; RV64I-NEXT: add a1, a1, a0 ; RV64I-NEXT: ld a0, 0(a1) ; RV64I-NEXT: ld a1, 8(a1) +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: slliuw_2: @@ -36,6 +37,7 @@ ; RV64ZBA-NEXT: add a1, a1, a0 ; RV64ZBA-NEXT: ld a0, 0(a1) ; RV64ZBA-NEXT: ld a1, 8(a1) +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %3 = zext i32 %0 to i64 %4 = getelementptr inbounds i128, i128* %1, i64 %3 @@ -67,12 +69,14 @@ ; RV64I-NEXT: srli a0, a0, 32 ; RV64I-NEXT: add a0, a1, a0 ; RV64I-NEXT: lb a0, 0(a0) +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: adduw_2: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: add.uw a0, a0, a1 ; RV64ZBA-NEXT: lb a0, 0(a0) +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %3 = zext i32 %0 to i64 %4 = getelementptr inbounds i8, i8* %1, i64 %3 @@ -103,12 +107,14 @@ ; RV64I-NEXT: ori a0, a0, 1 ; RV64I-NEXT: slli a0, a0, 32 ; RV64I-NEXT: srli a0, a0, 32 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: zextw_demandedbits_i64: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: ori a0, a0, 1 ; RV64ZBA-NEXT: zext.w a0, a0 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %2 = and i64 %0, 4294967294 %3 = or i64 %2, 1 @@ -121,12 +127,14 @@ ; RV64I-NEXT: slli a0, a0, 1 ; RV64I-NEXT: add a0, a1, a0 ; RV64I-NEXT: lh a0, 0(a0) +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: sh1add: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh1add a0, a0, a1 ; RV64ZBA-NEXT: lh a0, 0(a0) +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %3 = getelementptr inbounds i16, i16* %1, i64 %0 %4 = load i16, i16* %3 @@ -139,12 +147,14 @@ ; RV64I-NEXT: slli a0, a0, 2 ; RV64I-NEXT: add a0, a1, a0 ; RV64I-NEXT: lw a0, 0(a0) +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: sh2add: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh2add a0, a0, a1 ; RV64ZBA-NEXT: lw a0, 0(a0) +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %3 = getelementptr inbounds i32, i32* %1, i64 %0 %4 = load i32, i32* %3 @@ -157,12 +167,14 @@ ; RV64I-NEXT: slli a0, a0, 3 ; RV64I-NEXT: add a0, a1, a0 ; RV64I-NEXT: ld a0, 0(a0) +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: sh3add: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh3add a0, a0, a1 ; RV64ZBA-NEXT: ld a0, 0(a0) +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %3 = getelementptr inbounds i64, i64* %1, i64 %0 %4 = load i64, i64* %3 @@ -176,12 +188,14 @@ ; RV64I-NEXT: srli a0, a0, 31 ; RV64I-NEXT: add a0, a1, a0 ; RV64I-NEXT: lh a0, 0(a0) +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: sh1adduw: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh1add.uw a0, a0, a1 ; RV64ZBA-NEXT: lh a0, 0(a0) +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %3 = zext i32 %0 to i64 %4 = getelementptr inbounds i16, i16* %1, i64 %3 @@ -195,11 +209,13 @@ ; RV64I-NEXT: slli a0, a0, 32 ; RV64I-NEXT: srli a0, a0, 31 ; RV64I-NEXT: add a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: sh1adduw_2: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh1add.uw a0, a0, a1 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %3 = shl i64 %0, 1 %4 = and i64 %3, 8589934590 @@ -214,12 +230,14 @@ ; RV64I-NEXT: srli a0, a0, 30 ; RV64I-NEXT: add a0, a1, a0 ; RV64I-NEXT: lw a0, 0(a0) +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: sh2adduw: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh2add.uw a0, a0, a1 ; RV64ZBA-NEXT: lw a0, 0(a0) +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %3 = zext i32 %0 to i64 %4 = getelementptr inbounds i32, i32* %1, i64 %3 @@ -233,11 +251,13 @@ ; RV64I-NEXT: slli a0, a0, 32 ; RV64I-NEXT: srli a0, a0, 30 ; RV64I-NEXT: add a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: sh2adduw_2: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh2add.uw a0, a0, a1 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %3 = shl i64 %0, 2 %4 = and i64 %3, 17179869180 @@ -252,12 +272,14 @@ ; RV64I-NEXT: srli a0, a0, 29 ; RV64I-NEXT: add a0, a1, a0 ; RV64I-NEXT: ld a0, 0(a0) +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: sh3adduw: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh3add.uw a0, a0, a1 ; RV64ZBA-NEXT: ld a0, 0(a0) +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %3 = zext i32 %0 to i64 %4 = getelementptr inbounds i64, i64* %1, i64 %3 @@ -271,11 +293,13 @@ ; RV64I-NEXT: slli a0, a0, 32 ; RV64I-NEXT: srli a0, a0, 29 ; RV64I-NEXT: add a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: sh3adduw_2: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh3add.uw a0, a0, a1 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %3 = shl i64 %0, 3 %4 = and i64 %3, 34359738360 @@ -296,6 +320,7 @@ ; RV64I-NEXT: sllw a1, a2, a0 ; RV64I-NEXT: sraiw a0, a0, 2 ; RV64I-NEXT: mul a0, a1, a0 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: sh2add_extra_sext: @@ -304,6 +329,7 @@ ; RV64ZBA-NEXT: sllw a1, a2, a0 ; RV64ZBA-NEXT: sraiw a0, a0, 2 ; RV64ZBA-NEXT: mul a0, a1, a0 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %a = shl i32 %x, 2 %b = add i32 %a, %y @@ -321,12 +347,14 @@ ; RV64I-NEXT: addi a2, zero, 6 ; RV64I-NEXT: mul a0, a0, a2 ; RV64I-NEXT: add a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: addmul6: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh1add a0, a0, a0 ; RV64ZBA-NEXT: sh1add a0, a0, a1 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %c = mul i64 %a, 6 %d = add i64 %c, %b @@ -339,12 +367,14 @@ ; RV64I-NEXT: addi a2, zero, 10 ; RV64I-NEXT: mul a0, a0, a2 ; RV64I-NEXT: add a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: addmul10: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh2add a0, a0, a0 ; RV64ZBA-NEXT: sh1add a0, a0, a1 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %c = mul i64 %a, 10 %d = add i64 %c, %b @@ -357,12 +387,14 @@ ; RV64I-NEXT: addi a2, zero, 12 ; RV64I-NEXT: mul a0, a0, a2 ; RV64I-NEXT: add a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: addmul12: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh1add a0, a0, a0 ; RV64ZBA-NEXT: sh2add a0, a0, a1 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %c = mul i64 %a, 12 %d = add i64 %c, %b @@ -375,12 +407,14 @@ ; RV64I-NEXT: addi a2, zero, 18 ; RV64I-NEXT: mul a0, a0, a2 ; RV64I-NEXT: add a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: addmul18: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh3add a0, a0, a0 ; RV64ZBA-NEXT: sh1add a0, a0, a1 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %c = mul i64 %a, 18 %d = add i64 %c, %b @@ -393,12 +427,14 @@ ; RV64I-NEXT: addi a2, zero, 20 ; RV64I-NEXT: mul a0, a0, a2 ; RV64I-NEXT: add a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: addmul20: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh2add a0, a0, a0 ; RV64ZBA-NEXT: sh2add a0, a0, a1 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %c = mul i64 %a, 20 %d = add i64 %c, %b @@ -411,12 +447,14 @@ ; RV64I-NEXT: addi a2, zero, 24 ; RV64I-NEXT: mul a0, a0, a2 ; RV64I-NEXT: add a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: addmul24: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh1add a0, a0, a0 ; RV64ZBA-NEXT: sh3add a0, a0, a1 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %c = mul i64 %a, 24 %d = add i64 %c, %b @@ -429,12 +467,14 @@ ; RV64I-NEXT: addi a2, zero, 36 ; RV64I-NEXT: mul a0, a0, a2 ; RV64I-NEXT: add a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: addmul36: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh3add a0, a0, a0 ; RV64ZBA-NEXT: sh2add a0, a0, a1 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %c = mul i64 %a, 36 %d = add i64 %c, %b @@ -447,12 +487,14 @@ ; RV64I-NEXT: addi a2, zero, 40 ; RV64I-NEXT: mul a0, a0, a2 ; RV64I-NEXT: add a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: addmul40: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh2add a0, a0, a0 ; RV64ZBA-NEXT: sh3add a0, a0, a1 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %c = mul i64 %a, 40 %d = add i64 %c, %b @@ -465,12 +507,14 @@ ; RV64I-NEXT: addi a2, zero, 72 ; RV64I-NEXT: mul a0, a0, a2 ; RV64I-NEXT: add a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: addmul72: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh3add a0, a0, a0 ; RV64ZBA-NEXT: sh3add a0, a0, a1 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %c = mul i64 %a, 72 %d = add i64 %c, %b @@ -482,12 +526,14 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi a1, zero, 96 ; RV64I-NEXT: mul a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: mul96: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh1add a0, a0, a0 ; RV64ZBA-NEXT: slli a0, a0, 5 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %c = mul i64 %a, 96 ret i64 %c @@ -498,12 +544,14 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi a1, zero, 160 ; RV64I-NEXT: mul a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: mul160: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh2add a0, a0, a0 ; RV64ZBA-NEXT: slli a0, a0, 5 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %c = mul i64 %a, 160 ret i64 %c @@ -514,12 +562,14 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi a1, zero, 288 ; RV64I-NEXT: mul a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: mul288: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh3add a0, a0, a0 ; RV64ZBA-NEXT: slli a0, a0, 5 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %c = mul i64 %a, 288 ret i64 %c @@ -530,12 +580,14 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: slli a0, a0, 1 ; RV64I-NEXT: addi a0, a0, 5 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: sh1add_imm: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: slli a0, a0, 1 ; RV64ZBA-NEXT: addi a0, a0, 5 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %a = shl i64 %0, 1 %b = add i64 %a, 5 @@ -547,12 +599,14 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: slli a0, a0, 2 ; RV64I-NEXT: addi a0, a0, -6 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: sh2add_imm: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: slli a0, a0, 2 ; RV64ZBA-NEXT: addi a0, a0, -6 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %a = shl i64 %0, 2 %b = add i64 %a, -6 @@ -564,12 +618,14 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: slli a0, a0, 3 ; RV64I-NEXT: ori a0, a0, 7 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: sh3add_imm: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: slli a0, a0, 3 ; RV64ZBA-NEXT: ori a0, a0, 7 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %a = shl i64 %0, 3 %b = add i64 %a, 7 @@ -582,12 +638,14 @@ ; RV64I-NEXT: slli a0, a0, 32 ; RV64I-NEXT: srli a0, a0, 31 ; RV64I-NEXT: addi a0, a0, 11 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: sh1adduw_imm: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: slli.uw a0, a0, 1 ; RV64ZBA-NEXT: addi a0, a0, 11 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %a = zext i32 %0 to i64 %b = shl i64 %a, 1 @@ -601,12 +659,14 @@ ; RV64I-NEXT: slli a0, a0, 32 ; RV64I-NEXT: srli a0, a0, 30 ; RV64I-NEXT: addi a0, a0, -12 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: sh2adduw_imm: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: slli.uw a0, a0, 2 ; RV64ZBA-NEXT: addi a0, a0, -12 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %a = zext i32 %0 to i64 %b = shl i64 %a, 2 @@ -620,12 +680,14 @@ ; RV64I-NEXT: slli a0, a0, 32 ; RV64I-NEXT: srli a0, a0, 29 ; RV64I-NEXT: addi a0, a0, 13 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: sh3adduw_imm: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: slli.uw a0, a0, 3 ; RV64ZBA-NEXT: addi a0, a0, 13 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %a = zext i32 %0 to i64 %b = shl i64 %a, 3 @@ -656,12 +718,14 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi a1, zero, 258 ; RV64I-NEXT: mul a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: mul258: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: addi a1, zero, 258 ; RV64ZBA-NEXT: mul a0, a0, a1 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %c = mul i64 %a, 258 ret i64 %c @@ -672,12 +736,14 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi a1, zero, 260 ; RV64I-NEXT: mul a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: mul260: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: addi a1, zero, 260 ; RV64ZBA-NEXT: mul a0, a0, a1 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %c = mul i64 %a, 260 ret i64 %c @@ -688,12 +754,14 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi a1, zero, 264 ; RV64I-NEXT: mul a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: mul264: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: addi a1, zero, 264 ; RV64ZBA-NEXT: mul a0, a0, a1 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %c = mul i64 %a, 264 ret i64 %c @@ -738,12 +806,14 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi a1, zero, 11 ; RV64I-NEXT: mul a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: mul11: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh2add a1, a0, a0 ; RV64ZBA-NEXT: sh1add a0, a1, a0 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %c = mul i64 %a, 11 ret i64 %c @@ -754,12 +824,14 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi a1, zero, 19 ; RV64I-NEXT: mul a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: mul19: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh3add a1, a0, a0 ; RV64ZBA-NEXT: sh1add a0, a1, a0 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %c = mul i64 %a, 19 ret i64 %c @@ -770,12 +842,14 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi a1, zero, 13 ; RV64I-NEXT: mul a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: mul13: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh1add a1, a0, a0 ; RV64ZBA-NEXT: sh2add a0, a1, a0 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %c = mul i64 %a, 13 ret i64 %c @@ -786,12 +860,14 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi a1, zero, 21 ; RV64I-NEXT: mul a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: mul21: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh2add a1, a0, a0 ; RV64ZBA-NEXT: sh2add a0, a1, a0 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %c = mul i64 %a, 21 ret i64 %c @@ -802,12 +878,14 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi a1, zero, 37 ; RV64I-NEXT: mul a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: mul37: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh3add a1, a0, a0 ; RV64ZBA-NEXT: sh2add a0, a1, a0 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %c = mul i64 %a, 37 ret i64 %c @@ -818,12 +896,14 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi a1, zero, 25 ; RV64I-NEXT: mul a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: mul25: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh1add a1, a0, a0 ; RV64ZBA-NEXT: sh3add a0, a1, a0 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %c = mul i64 %a, 25 ret i64 %c @@ -834,12 +914,14 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi a1, zero, 41 ; RV64I-NEXT: mul a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: mul41: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh2add a1, a0, a0 ; RV64ZBA-NEXT: sh3add a0, a1, a0 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %c = mul i64 %a, 41 ret i64 %c @@ -850,12 +932,14 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi a1, zero, 73 ; RV64I-NEXT: mul a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: mul73: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh3add a1, a0, a0 ; RV64ZBA-NEXT: sh3add a0, a1, a0 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %c = mul i64 %a, 73 ret i64 %c @@ -866,12 +950,14 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi a1, zero, 27 ; RV64I-NEXT: mul a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: mul27: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh3add a0, a0, a0 ; RV64ZBA-NEXT: sh1add a0, a0, a0 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %c = mul i64 %a, 27 ret i64 %c @@ -882,12 +968,14 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi a1, zero, 45 ; RV64I-NEXT: mul a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: mul45: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh3add a0, a0, a0 ; RV64ZBA-NEXT: sh2add a0, a0, a0 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %c = mul i64 %a, 45 ret i64 %c @@ -898,12 +986,14 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi a1, zero, 81 ; RV64I-NEXT: mul a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: mul81: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh3add a0, a0, a0 ; RV64ZBA-NEXT: sh3add a0, a0, a0 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %c = mul i64 %a, 81 ret i64 %c @@ -915,12 +1005,14 @@ ; RV64I-NEXT: lui a1, 1 ; RV64I-NEXT: addiw a1, a1, 2 ; RV64I-NEXT: mul a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: mul4098: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: slli a1, a0, 12 ; RV64ZBA-NEXT: sh1add a0, a0, a1 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %c = mul i64 %a, 4098 ret i64 %c @@ -932,12 +1024,14 @@ ; RV64I-NEXT: lui a1, 1 ; RV64I-NEXT: addiw a1, a1, 4 ; RV64I-NEXT: mul a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: mul4100: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: slli a1, a0, 12 ; RV64ZBA-NEXT: sh2add a0, a0, a1 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %c = mul i64 %a, 4100 ret i64 %c @@ -949,12 +1043,14 @@ ; RV64I-NEXT: lui a1, 1 ; RV64I-NEXT: addiw a1, a1, 8 ; RV64I-NEXT: mul a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: mul4104: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: slli a1, a0, 12 ; RV64ZBA-NEXT: sh3add a0, a0, a1 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %c = mul i64 %a, 4104 ret i64 %c @@ -965,12 +1061,14 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi a1, zero, 192 ; RV64I-NEXT: mulw a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: mulw192: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh1add a0, a0, a0 ; RV64ZBA-NEXT: slliw a0, a0, 6 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %c = mul i32 %a, 192 ret i32 %c @@ -981,12 +1079,14 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi a1, zero, 320 ; RV64I-NEXT: mulw a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: mulw320: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh2add a0, a0, a0 ; RV64ZBA-NEXT: slliw a0, a0, 6 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %c = mul i32 %a, 320 ret i32 %c @@ -997,12 +1097,14 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi a1, zero, 576 ; RV64I-NEXT: mulw a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: mulw576: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh3add a0, a0, a0 ; RV64ZBA-NEXT: slliw a0, a0, 6 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %c = mul i32 %a, 576 ret i32 %c @@ -1014,12 +1116,14 @@ ; RV64I-NEXT: lui a1, 1 ; RV64I-NEXT: addiw a1, a1, 8 ; RV64I-NEXT: add a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: add4104: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: addi a1, zero, 1026 ; RV64ZBA-NEXT: sh2add a0, a1, a0 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %c = add i64 %a, 4104 ret i64 %c @@ -1031,12 +1135,14 @@ ; RV64I-NEXT: lui a1, 2 ; RV64I-NEXT: addiw a1, a1, 16 ; RV64I-NEXT: add a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: add8208: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: addi a1, zero, 1026 ; RV64ZBA-NEXT: sh3add a0, a1, a0 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %c = add i64 %a, 8208 ret i64 %c @@ -1048,12 +1154,14 @@ ; RV64I-NEXT: slliw a0, a0, 5 ; RV64I-NEXT: slliw a1, a1, 6 ; RV64I-NEXT: addw a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: addshl32_5_6: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh1add a0, a1, a0 ; RV64ZBA-NEXT: slliw a0, a0, 5 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %c = shl i32 %a, 5 %d = shl i32 %b, 6 @@ -1067,12 +1175,14 @@ ; RV64I-NEXT: slli a0, a0, 5 ; RV64I-NEXT: slli a1, a1, 6 ; RV64I-NEXT: add a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: addshl64_5_6: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh1add a0, a1, a0 ; RV64ZBA-NEXT: slli a0, a0, 5 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %c = shl i64 %a, 5 %d = shl i64 %b, 6 @@ -1086,12 +1196,14 @@ ; RV64I-NEXT: slliw a0, a0, 5 ; RV64I-NEXT: slliw a1, a1, 7 ; RV64I-NEXT: addw a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: addshl32_5_7: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh2add a0, a1, a0 ; RV64ZBA-NEXT: slliw a0, a0, 5 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %c = shl i32 %a, 5 %d = shl i32 %b, 7 @@ -1105,12 +1217,14 @@ ; RV64I-NEXT: slli a0, a0, 5 ; RV64I-NEXT: slli a1, a1, 7 ; RV64I-NEXT: add a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: addshl64_5_7: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh2add a0, a1, a0 ; RV64ZBA-NEXT: slli a0, a0, 5 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %c = shl i64 %a, 5 %d = shl i64 %b, 7 @@ -1124,12 +1238,14 @@ ; RV64I-NEXT: slliw a0, a0, 5 ; RV64I-NEXT: slliw a1, a1, 8 ; RV64I-NEXT: addw a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: addshl32_5_8: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh3add a0, a1, a0 ; RV64ZBA-NEXT: slliw a0, a0, 5 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %c = shl i32 %a, 5 %d = shl i32 %b, 8 @@ -1143,12 +1259,14 @@ ; RV64I-NEXT: slli a0, a0, 5 ; RV64I-NEXT: slli a1, a1, 8 ; RV64I-NEXT: add a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: addshl64_5_8: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh3add a0, a1, a0 ; RV64ZBA-NEXT: slli a0, a0, 5 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %c = shl i64 %a, 5 %d = shl i64 %b, 8 diff --git a/llvm/test/CodeGen/RISCV/rv64zbb.ll b/llvm/test/CodeGen/RISCV/rv64zbb.ll --- a/llvm/test/CodeGen/RISCV/rv64zbb.ll +++ b/llvm/test/CodeGen/RISCV/rv64zbb.ll @@ -421,7 +421,9 @@ ; RV64I-NEXT: srli a0, a0, 56 ; RV64I-NEXT: addi a0, a0, -32 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: .cfi_restore ra ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; RV64I-NEXT: .LBB4_2: ; RV64I-NEXT: addi a0, zero, 32 @@ -431,6 +433,7 @@ ; RV64ZBB: # %bb.0: ; RV64ZBB-NEXT: srliw a0, a0, 1 ; RV64ZBB-NEXT: clzw a0, a0 +; RV64ZBB-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBB-NEXT: ret %1 = lshr i32 %a, 1 %2 = call i32 @llvm.ctlz.i32(i32 %1, i1 false) @@ -1264,13 +1267,15 @@ ; RV64I-NEXT: srai a1, a0, 63 ; RV64I-NEXT: add a0, a0, a1 ; RV64I-NEXT: xor a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret -; + ; RV64ZBB-LABEL: abs_i32: ; RV64ZBB: # %bb.0: ; RV64ZBB-NEXT: sext.w a0, a0 ; RV64ZBB-NEXT: neg a1, a0 ; RV64ZBB-NEXT: max a0, a0, a1 +; RV64ZBB-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBB-NEXT: ret %abs = tail call i32 @llvm.abs.i32(i32 %x, i1 true) ret i32 %abs @@ -1284,12 +1289,14 @@ ; RV64I-NEXT: srai a1, a0, 63 ; RV64I-NEXT: add a0, a0, a1 ; RV64I-NEXT: xor a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBB-LABEL: abs_i64: ; RV64ZBB: # %bb.0: ; RV64ZBB-NEXT: neg a1, a0 ; RV64ZBB-NEXT: max a0, a0, a1 +; RV64ZBB-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBB-NEXT: ret %abs = tail call i64 @llvm.abs.i64(i64 %x, i1 true) ret i64 %abs @@ -1418,11 +1425,13 @@ ; RV64I-NEXT: or a0, a0, a3 ; RV64I-NEXT: or a0, a0, a2 ; RV64I-NEXT: or a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBB-LABEL: bswap_i64: ; RV64ZBB: # %bb.0: ; RV64ZBB-NEXT: rev8 a0, a0 +; RV64ZBB-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBB-NEXT: ret %1 = call i64 @llvm.bswap.i64(i64 %a) ret i64 %1 diff --git a/llvm/test/CodeGen/RISCV/rv64zbp.ll b/llvm/test/CodeGen/RISCV/rv64zbp.ll --- a/llvm/test/CodeGen/RISCV/rv64zbp.ll +++ b/llvm/test/CodeGen/RISCV/rv64zbp.ll @@ -2469,11 +2469,13 @@ ; RV64I-NEXT: or a0, a0, a3 ; RV64I-NEXT: or a0, a0, a2 ; RV64I-NEXT: or a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBP-LABEL: bswap_i64: ; RV64ZBP: # %bb.0: ; RV64ZBP-NEXT: rev8 a0, a0 +; RV64ZBP-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBP-NEXT: ret %1 = call i64 @llvm.bswap.i64(i64 %a) ret i64 %1 @@ -2742,11 +2744,13 @@ ; RV64I-NEXT: slliw a0, a0, 16 ; RV64I-NEXT: srliw a1, a1, 16 ; RV64I-NEXT: or a0, a1, a0 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBP-LABEL: bswap_rotr_i32: ; RV64ZBP: # %bb.0: ; RV64ZBP-NEXT: greviw a0, a0, 8 +; RV64ZBP-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBP-NEXT: ret %1 = call i32 @llvm.bswap.i32(i32 %a) %2 = call i32 @llvm.fshr.i32(i32 %1, i32 %1, i32 16) @@ -2768,11 +2772,13 @@ ; RV64I-NEXT: srliw a0, a0, 16 ; RV64I-NEXT: slliw a1, a1, 16 ; RV64I-NEXT: or a0, a1, a0 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBP-LABEL: bswap_rotl_i32: ; RV64ZBP: # %bb.0: ; RV64ZBP-NEXT: greviw a0, a0, 8 +; RV64ZBP-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBP-NEXT: ret %1 = call i32 @llvm.bswap.i32(i32 %a) %2 = call i32 @llvm.fshl.i32(i32 %1, i32 %1, i32 16) @@ -2824,11 +2830,13 @@ ; RV64I-NEXT: slliw a0, a0, 24 ; RV64I-NEXT: or a0, a0, a2 ; RV64I-NEXT: or a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBP-LABEL: bitreverse_bswap_i32: ; RV64ZBP: # %bb.0: ; RV64ZBP-NEXT: greviw a0, a0, 7 +; RV64ZBP-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBP-NEXT: ret %1 = call i32 @llvm.bitreverse.i32(i32 %a) %2 = call i32 @llvm.bswap.i32(i32 %1) @@ -2926,11 +2934,13 @@ ; RV64I-NEXT: or a0, a0, a1 ; RV64I-NEXT: or a0, a0, a3 ; RV64I-NEXT: or a0, a0, a2 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBP-LABEL: bitreverse_bswap_i64: ; RV64ZBP: # %bb.0: ; RV64ZBP-NEXT: rev.b a0, a0 +; RV64ZBP-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBP-NEXT: ret %1 = call i64 @llvm.bitreverse.i64(i64 %a) %2 = call i64 @llvm.bswap.i64(i64 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vxor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vxor-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vxor-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vxor-vp.ll @@ -11,6 +11,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i7 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -25,6 +26,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv1i8( %va, %b, %m, i32 %evl) ret %v @@ -35,6 +37,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -47,6 +50,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -59,6 +63,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -73,6 +78,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -85,6 +91,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -99,6 +106,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -111,6 +119,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -127,6 +136,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv2i8( %va, %b, %m, i32 %evl) ret %v @@ -135,8 +145,9 @@ define @vxor_vv_nxv2i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv2i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8,mf4, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -149,6 +160,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -161,6 +173,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -175,6 +188,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -187,6 +201,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -201,6 +216,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -213,6 +229,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -229,6 +246,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv4i8( %va, %b, %m, i32 %evl) ret %v @@ -239,6 +257,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -251,6 +270,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -263,6 +283,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -277,6 +298,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -289,6 +311,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -303,6 +326,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -315,6 +339,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -331,6 +356,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv8i8( %va, %b, %m, i32 %evl) ret %v @@ -341,6 +367,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -353,6 +380,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -365,6 +393,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -379,6 +408,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -391,6 +421,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -405,6 +436,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -417,6 +449,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -433,6 +466,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv15i8( %va, %b, %m, i32 %evl) ret %v @@ -443,6 +477,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -455,6 +490,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -467,6 +503,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -481,6 +518,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -493,6 +531,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -507,6 +546,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -519,6 +559,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -535,6 +576,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv16i8( %va, %b, %m, i32 %evl) ret %v @@ -545,6 +587,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -557,6 +600,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -569,6 +613,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -583,6 +628,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -595,6 +641,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -609,6 +656,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -621,6 +669,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -637,6 +686,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv32i8( %va, %b, %m, i32 %evl) ret %v @@ -647,6 +697,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -659,6 +710,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -671,6 +723,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -685,6 +738,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -697,6 +751,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -711,6 +766,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -723,6 +779,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -739,6 +796,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv64i8( %va, %b, %m, i32 %evl) ret %v @@ -749,6 +807,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -761,6 +820,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -773,6 +833,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -787,6 +848,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -799,6 +861,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -813,6 +876,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -825,6 +889,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -841,6 +906,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv1i16( %va, %b, %m, i32 %evl) ret %v @@ -851,6 +917,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -863,6 +930,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -875,6 +943,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -889,6 +958,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -901,6 +971,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -915,6 +986,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -927,6 +999,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -943,6 +1016,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv2i16( %va, %b, %m, i32 %evl) ret %v @@ -953,6 +1027,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -965,6 +1040,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -977,6 +1053,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -991,6 +1068,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1003,6 +1081,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1017,6 +1096,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1029,6 +1109,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1045,6 +1126,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv4i16( %va, %b, %m, i32 %evl) ret %v @@ -1055,6 +1137,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1067,6 +1150,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1079,6 +1163,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1093,6 +1178,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1105,6 +1191,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1119,6 +1206,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1131,6 +1219,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1147,6 +1236,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv8i16( %va, %b, %m, i32 %evl) ret %v @@ -1157,6 +1247,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1169,6 +1260,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1181,6 +1273,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1195,6 +1288,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1207,6 +1301,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1221,6 +1316,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1233,6 +1329,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1249,6 +1346,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv16i16( %va, %b, %m, i32 %evl) ret %v @@ -1259,6 +1357,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1271,6 +1370,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1283,6 +1383,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1297,6 +1398,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1309,6 +1411,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1323,6 +1426,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1335,6 +1439,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1351,6 +1456,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv32i16( %va, %b, %m, i32 %evl) ret %v @@ -1361,6 +1467,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1373,6 +1480,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1385,6 +1493,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1399,6 +1508,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1411,6 +1521,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1425,6 +1536,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1437,6 +1549,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1453,6 +1566,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv1i32( %va, %b, %m, i32 %evl) ret %v @@ -1463,6 +1577,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1475,6 +1590,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1487,6 +1603,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1501,6 +1618,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1513,6 +1631,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1527,6 +1646,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1539,6 +1659,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1555,6 +1676,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv2i32( %va, %b, %m, i32 %evl) ret %v @@ -1565,6 +1687,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1577,6 +1700,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1589,6 +1713,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1603,6 +1728,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1615,6 +1741,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1629,6 +1756,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1641,6 +1769,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1657,6 +1786,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv4i32( %va, %b, %m, i32 %evl) ret %v @@ -1667,6 +1797,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1679,6 +1810,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1691,6 +1823,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1705,6 +1838,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1717,6 +1851,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1731,6 +1866,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1743,6 +1879,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1759,6 +1896,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv8i32( %va, %b, %m, i32 %evl) ret %v @@ -1769,6 +1907,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1781,6 +1920,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1793,6 +1933,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1807,6 +1948,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1819,6 +1961,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1833,6 +1976,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1845,6 +1989,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1861,6 +2006,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv16i32( %va, %b, %m, i32 %evl) ret %v @@ -1871,6 +2017,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1883,6 +2030,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1895,6 +2043,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1909,6 +2058,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1921,6 +2071,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1935,6 +2086,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1947,6 +2099,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1963,6 +2116,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv1i64( %va, %b, %m, i32 %evl) ret %v @@ -1973,6 +2127,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1993,6 +2148,7 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vxor.vv v8, v8, v25, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vxor_vx_nxv1i64: @@ -2019,6 +2175,7 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vxor.vv v8, v8, v25 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vxor_vx_nxv1i64_unmasked: @@ -2039,6 +2196,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -2051,6 +2209,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -2065,6 +2224,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -2077,6 +2237,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -2093,6 +2254,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv2i64( %va, %b, %m, i32 %evl) ret %v @@ -2103,6 +2265,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -2123,6 +2286,7 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vxor.vv v8, v8, v26, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vxor_vx_nxv2i64: @@ -2149,6 +2313,7 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vxor.vv v8, v8, v26 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vxor_vx_nxv2i64_unmasked: @@ -2169,6 +2334,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -2181,6 +2347,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -2195,6 +2362,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -2207,6 +2375,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -2223,6 +2392,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv4i64( %va, %b, %m, i32 %evl) ret %v @@ -2233,6 +2403,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -2253,6 +2424,7 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vxor.vv v8, v8, v28, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vxor_vx_nxv4i64: @@ -2279,6 +2451,7 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vxor.vv v8, v8, v28 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vxor_vx_nxv4i64_unmasked: @@ -2299,6 +2472,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -2311,6 +2485,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -2325,6 +2500,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -2337,6 +2513,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -2353,6 +2530,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv8i64( %va, %b, %m, i32 %evl) ret %v @@ -2363,6 +2541,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -2383,6 +2562,7 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vxor.vv v8, v8, v16, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vxor_vx_nxv8i64: @@ -2409,6 +2589,7 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vxor.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vxor_vx_nxv8i64_unmasked: @@ -2429,6 +2610,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -2441,6 +2623,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -2455,6 +2638,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -2467,6 +2651,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/zvlsseg-spill.mir b/llvm/test/CodeGen/RISCV/rvv/zvlsseg-spill.mir --- a/llvm/test/CodeGen/RISCV/rvv/zvlsseg-spill.mir +++ b/llvm/test/CodeGen/RISCV/rvv/zvlsseg-spill.mir @@ -39,6 +39,7 @@ ; CHECK-NEXT: $x10 = frame-destroy SLLI killed $x10, 3 ; CHECK-NEXT: $x2 = frame-destroy ADD $x2, killed $x10 ; CHECK-NEXT: $x2 = frame-destroy ADDI $x2, 16 + ; CHECK-NEXT: CFI_INSTRUCTION def_cfa_offset 0 ; CHECK-NEXT: PseudoRET %0:gpr = COPY $x10 %1:gprnox0 = COPY $x11 diff --git a/llvm/test/CodeGen/RISCV/rvv/zvlsseg-zero-vl.ll b/llvm/test/CodeGen/RISCV/rvv/zvlsseg-zero-vl.ll --- a/llvm/test/CodeGen/RISCV/rvv/zvlsseg-zero-vl.ll +++ b/llvm/test/CodeGen/RISCV/rvv/zvlsseg-zero-vl.ll @@ -16,6 +16,7 @@ ; CHECK-NEXT: vmv4r.v v8, v4 ; CHECK-NEXT: vlseg2e16.v v4, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv16i16(i16* %base, i64 0) @@ -36,6 +37,7 @@ ; CHECK-NEXT: vmv4r.v v8, v4 ; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv16i16(i16* %base, i64 %offset, i64 0) @@ -55,6 +57,7 @@ ; CHECK-NEXT: vmv4r.v v16, v12 ; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t ; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i16(i16* %base, %index, i64 0) @@ -75,6 +78,7 @@ ; CHECK-NEXT: vmv4r.v v16, v12 ; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8, v0.t ; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16i16.nxv16i16(i16* %base, %index, i64 0) @@ -95,6 +99,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a1) ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv16i16(i16* %base, i64 0) @@ -113,6 +118,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a1) ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16i16( %val, %val, i16* %base, %mask, i64 0, i64 1) @@ -132,6 +138,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, mu ; CHECK-NEXT: vsseg2e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.nxv16i16( %val, %val, i16* %base, i64 0) @@ -145,6 +152,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, mu ; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.mask.nxv16i16( %val, %val, i16* %base, %mask, i64 0) @@ -161,6 +169,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, mu ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.nxv16i16( %val, %val, i16* %base, i64 %offset, i64 0) @@ -174,6 +183,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, mu ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.mask.nxv16i16( %val, %val, i16* %base, i64 %offset, %mask, i64 0) @@ -191,6 +201,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v28 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv16i16.nxv16i16( %val, %val, i16* %base, %index, i64 0) @@ -205,6 +216,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v28, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i16( %val, %val, i16* %base, %index, %mask, i64 0) @@ -222,6 +234,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v28 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv16i16.nxv16i16( %val, %val, i16* %base, %index, i64 0) @@ -236,6 +249,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v28, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i16( %val, %val, i16* %base, %index, %mask, i64 0) diff --git a/llvm/test/CodeGen/RISCV/scalable-vector-struct.ll b/llvm/test/CodeGen/RISCV/scalable-vector-struct.ll --- a/llvm/test/CodeGen/RISCV/scalable-vector-struct.ll +++ b/llvm/test/CodeGen/RISCV/scalable-vector-struct.ll @@ -9,6 +9,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vs1r.v v8, (a1) ; CHECK-NEXT: vs1r.v v9, (a2) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: br label %return diff --git a/llvm/test/CodeGen/RISCV/select-binop-identity.ll b/llvm/test/CodeGen/RISCV/select-binop-identity.ll --- a/llvm/test/CodeGen/RISCV/select-binop-identity.ll +++ b/llvm/test/CodeGen/RISCV/select-binop-identity.ll @@ -18,6 +18,7 @@ ; RV32I-NEXT: and a2, a2, a1 ; RV32I-NEXT: .LBB0_2: ; RV32I-NEXT: mv a0, a2 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: and_select_all_ones_i32: @@ -27,6 +28,7 @@ ; RV64I-NEXT: and a2, a2, a1 ; RV64I-NEXT: .LBB0_2: ; RV64I-NEXT: sext.w a0, a2 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %a = select i1 %c, i32 %x, i32 -1 %b = and i32 %a, %y @@ -43,6 +45,7 @@ ; RV32I-NEXT: .LBB1_2: ; RV32I-NEXT: mv a0, a3 ; RV32I-NEXT: mv a1, a4 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: and_select_all_ones_i64: @@ -52,6 +55,7 @@ ; RV64I-NEXT: and a2, a2, a1 ; RV64I-NEXT: .LBB1_2: ; RV64I-NEXT: mv a0, a2 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %a = select i1 %c, i64 -1, i64 %x %b = and i64 %y, %a @@ -66,6 +70,7 @@ ; RV32I-NEXT: or a2, a2, a1 ; RV32I-NEXT: .LBB2_2: ; RV32I-NEXT: mv a0, a2 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: or_select_all_zeros_i32: @@ -75,6 +80,7 @@ ; RV64I-NEXT: or a2, a2, a1 ; RV64I-NEXT: .LBB2_2: ; RV64I-NEXT: mv a0, a2 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %a = select i1 %c, i32 %x, i32 0 %b = or i32 %y, %a @@ -91,6 +97,7 @@ ; RV32I-NEXT: .LBB3_2: ; RV32I-NEXT: mv a0, a3 ; RV32I-NEXT: mv a1, a4 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: or_select_all_zeros_i64: @@ -100,6 +107,7 @@ ; RV64I-NEXT: or a2, a2, a1 ; RV64I-NEXT: .LBB3_2: ; RV64I-NEXT: mv a0, a2 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %a = select i1 %c, i64 0, i64 %x %b = or i64 %a, %y @@ -114,6 +122,7 @@ ; RV32I-NEXT: xor a2, a2, a1 ; RV32I-NEXT: .LBB4_2: ; RV32I-NEXT: mv a0, a2 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: xor_select_all_zeros_i32: @@ -123,6 +132,7 @@ ; RV64I-NEXT: xor a2, a2, a1 ; RV64I-NEXT: .LBB4_2: ; RV64I-NEXT: mv a0, a2 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %a = select i1 %c, i32 0, i32 %x %b = xor i32 %y, %a @@ -139,6 +149,7 @@ ; RV32I-NEXT: .LBB5_2: ; RV32I-NEXT: mv a0, a3 ; RV32I-NEXT: mv a1, a4 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: xor_select_all_zeros_i64: @@ -148,6 +159,7 @@ ; RV64I-NEXT: xor a2, a2, a1 ; RV64I-NEXT: .LBB5_2: ; RV64I-NEXT: mv a0, a2 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %a = select i1 %c, i64 %x, i64 0 %b = xor i64 %a, %y @@ -162,6 +174,7 @@ ; RV32I-NEXT: add a2, a2, a1 ; RV32I-NEXT: .LBB6_2: ; RV32I-NEXT: mv a0, a2 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: add_select_all_zeros_i32: @@ -171,6 +184,7 @@ ; RV64I-NEXT: addw a2, a2, a1 ; RV64I-NEXT: .LBB6_2: ; RV64I-NEXT: mv a0, a2 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %a = select i1 %c, i32 0, i32 %x %b = add i32 %y, %a @@ -190,6 +204,7 @@ ; RV32I-NEXT: .LBB7_2: ; RV32I-NEXT: mv a0, a3 ; RV32I-NEXT: mv a1, a4 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: add_select_all_zeros_i64: @@ -199,6 +214,7 @@ ; RV64I-NEXT: add a2, a2, a1 ; RV64I-NEXT: .LBB7_2: ; RV64I-NEXT: mv a0, a2 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %a = select i1 %c, i64 %x, i64 0 %b = add i64 %a, %y @@ -213,6 +229,7 @@ ; RV32I-NEXT: sub a2, a2, a1 ; RV32I-NEXT: .LBB8_2: ; RV32I-NEXT: mv a0, a2 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: sub_select_all_zeros_i32: @@ -222,6 +239,7 @@ ; RV64I-NEXT: subw a2, a2, a1 ; RV64I-NEXT: .LBB8_2: ; RV64I-NEXT: mv a0, a2 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %a = select i1 %c, i32 0, i32 %x %b = sub i32 %y, %a @@ -240,6 +258,7 @@ ; RV32I-NEXT: .LBB9_2: ; RV32I-NEXT: mv a0, a3 ; RV32I-NEXT: mv a1, a4 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: sub_select_all_zeros_i64: @@ -249,6 +268,7 @@ ; RV64I-NEXT: sub a2, a2, a1 ; RV64I-NEXT: .LBB9_2: ; RV64I-NEXT: mv a0, a2 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %a = select i1 %c, i64 %x, i64 0 %b = sub i64 %y, %a diff --git a/llvm/test/CodeGen/RISCV/select-constant-xor.ll b/llvm/test/CodeGen/RISCV/select-constant-xor.ll --- a/llvm/test/CodeGen/RISCV/select-constant-xor.ll +++ b/llvm/test/CodeGen/RISCV/select-constant-xor.ll @@ -9,6 +9,7 @@ ; CHECK32-NEXT: lui a1, 524288 ; CHECK32-NEXT: addi a1, a1, -1 ; CHECK32-NEXT: xor a0, a0, a1 +; CHECK32-NEXT: .cfi_def_cfa_offset 0 ; CHECK32-NEXT: ret ; ; CHECK64-LABEL: xori64i32: diff --git a/llvm/test/CodeGen/RISCV/sext-zext-trunc.ll b/llvm/test/CodeGen/RISCV/sext-zext-trunc.ll --- a/llvm/test/CodeGen/RISCV/sext-zext-trunc.ll +++ b/llvm/test/CodeGen/RISCV/sext-zext-trunc.ll @@ -443,12 +443,14 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: andi a0, a0, 1 ; RV32I-NEXT: addi a0, a0, -1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: sext_of_not_i32: ; RV64I: # %bb.0: ; RV64I-NEXT: andi a0, a0, 1 ; RV64I-NEXT: addi a0, a0, -1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %xor = xor i1 %x, 1 %sext = sext i1 %xor to i32 @@ -462,12 +464,14 @@ ; RV32I-NEXT: addi a0, a1, -1 ; RV32I-NEXT: sltu a1, a0, a1 ; RV32I-NEXT: addi a1, a1, -1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: sext_of_not_i64: ; RV64I: # %bb.0: ; RV64I-NEXT: andi a0, a0, 1 ; RV64I-NEXT: addi a0, a0, -1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %xor = xor i1 %x, 1 %sext = sext i1 %xor to i64 @@ -481,6 +485,7 @@ ; RV32I-NEXT: addi a0, a0, -7 ; RV32I-NEXT: snez a0, a0 ; RV32I-NEXT: neg a0, a0 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: sext_of_not_cmp_i32: @@ -489,6 +494,7 @@ ; RV64I-NEXT: addi a0, a0, -7 ; RV64I-NEXT: snez a0, a0 ; RV64I-NEXT: neg a0, a0 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %cmp = icmp eq i32 %x, 7 %xor = xor i1 %cmp, 1 @@ -504,6 +510,7 @@ ; RV32I-NEXT: snez a0, a0 ; RV32I-NEXT: neg a0, a0 ; RV32I-NEXT: mv a1, a0 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: sext_of_not_cmp_i64: @@ -511,6 +518,7 @@ ; RV64I-NEXT: addi a0, a0, -7 ; RV64I-NEXT: snez a0, a0 ; RV64I-NEXT: neg a0, a0 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %cmp = icmp eq i64 %x, 7 %xor = xor i1 %cmp, 1 @@ -525,6 +533,7 @@ ; RV32I-NEXT: addi a0, a0, -7 ; RV32I-NEXT: seqz a0, a0 ; RV32I-NEXT: addi a0, a0, -1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: dec_of_zexted_cmp_i32: @@ -533,6 +542,7 @@ ; RV64I-NEXT: addi a0, a0, -7 ; RV64I-NEXT: seqz a0, a0 ; RV64I-NEXT: addi a0, a0, -1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %cmp = icmp eq i32 %x, 7 %zext = zext i1 %cmp to i32 @@ -549,6 +559,7 @@ ; RV32I-NEXT: addi a0, a1, -1 ; RV32I-NEXT: sltu a1, a0, a1 ; RV32I-NEXT: addi a1, a1, -1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: dec_of_zexted_cmp_i64: @@ -556,6 +567,7 @@ ; RV64I-NEXT: addi a0, a0, -7 ; RV64I-NEXT: seqz a0, a0 ; RV64I-NEXT: addi a0, a0, -1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %cmp = icmp eq i64 %x, 7 %zext = zext i1 %cmp to i64 diff --git a/llvm/test/CodeGen/RISCV/shadowcallstack.ll b/llvm/test/CodeGen/RISCV/shadowcallstack.ll --- a/llvm/test/CodeGen/RISCV/shadowcallstack.ll +++ b/llvm/test/CodeGen/RISCV/shadowcallstack.ll @@ -7,10 +7,12 @@ define void @f1() shadowcallstack { ; RV32-LABEL: f1: ; RV32: # %bb.0: +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: f1: ; RV64: # %bb.0: +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ret void } @@ -20,10 +22,12 @@ define void @f2() shadowcallstack { ; RV32-LABEL: f2: ; RV32: # %bb.0: +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: tail foo@plt ; ; RV64-LABEL: f2: ; RV64: # %bb.0: +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: tail foo@plt tail call void @foo() ret void @@ -42,9 +46,11 @@ ; RV32-NEXT: .cfi_offset ra, -4 ; RV32-NEXT: call bar@plt ; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32-NEXT: .cfi_restore ra ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: lw ra, -4(s2) ; RV32-NEXT: addi s2, s2, -4 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: f3: @@ -57,9 +63,11 @@ ; RV64-NEXT: .cfi_offset ra, -8 ; RV64-NEXT: call bar@plt ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64-NEXT: .cfi_restore ra ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: ld ra, -8(s2) ; RV64-NEXT: addi s2, s2, -8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %res = call i32 @bar() %res1 = add i32 %res, 1 @@ -95,9 +103,14 @@ ; RV32-NEXT: lw s1, 4(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s0, 8(sp) # 4-byte Folded Reload ; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32-NEXT: .cfi_restore ra +; RV32-NEXT: .cfi_restore s0 +; RV32-NEXT: .cfi_restore s1 +; RV32-NEXT: .cfi_restore s3 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: lw ra, -4(s2) ; RV32-NEXT: addi s2, s2, -4 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: f4: @@ -128,9 +141,14 @@ ; RV64-NEXT: ld s1, 8(sp) # 8-byte Folded Reload ; RV64-NEXT: ld s0, 16(sp) # 8-byte Folded Reload ; RV64-NEXT: ld ra, 24(sp) # 8-byte Folded Reload +; RV64-NEXT: .cfi_restore ra +; RV64-NEXT: .cfi_restore s0 +; RV64-NEXT: .cfi_restore s1 +; RV64-NEXT: .cfi_restore s3 ; RV64-NEXT: addi sp, sp, 32 ; RV64-NEXT: ld ra, -8(s2) ; RV64-NEXT: addi s2, s2, -8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %res1 = call i32 @bar() %res2 = call i32 @bar() diff --git a/llvm/test/CodeGen/RISCV/shift-and.ll b/llvm/test/CodeGen/RISCV/shift-and.ll --- a/llvm/test/CodeGen/RISCV/shift-and.ll +++ b/llvm/test/CodeGen/RISCV/shift-and.ll @@ -13,12 +13,14 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: srli a0, a0, 5 ; RV32I-NEXT: andi a0, a0, -8 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: test1: ; RV64I: # %bb.0: ; RV64I-NEXT: srliw a0, a0, 8 ; RV64I-NEXT: slli a0, a0, 3 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %a = lshr i32 %x, 5 %b = and i32 %a, 134217720 @@ -33,12 +35,14 @@ ; RV32I-NEXT: or a0, a0, a2 ; RV32I-NEXT: srli a1, a1, 5 ; RV32I-NEXT: andi a0, a0, -8 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: test2: ; RV64I: # %bb.0: ; RV64I-NEXT: srli a0, a0, 5 ; RV64I-NEXT: andi a0, a0, -8 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %a = lshr i64 %x, 5 %b = and i64 %a, 576460752303423480 @@ -50,12 +54,14 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: srli a0, a0, 20 ; RV32I-NEXT: slli a0, a0, 14 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: test3: ; RV64I: # %bb.0: ; RV64I-NEXT: srliw a0, a0, 20 ; RV64I-NEXT: slli a0, a0, 14 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %a = lshr i32 %x, 6 %b = and i32 %a, 67092480 @@ -71,12 +77,14 @@ ; RV32I-NEXT: srli a1, a1, 6 ; RV32I-NEXT: lui a2, 1048572 ; RV32I-NEXT: and a0, a0, a2 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: test4: ; RV64I: # %bb.0: ; RV64I-NEXT: srli a0, a0, 20 ; RV64I-NEXT: slli a0, a0, 14 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %a = lshr i64 %x, 6 %b = and i64 %a, 288230376151695360 @@ -88,6 +96,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: srli a0, a0, 10 ; RV32I-NEXT: slli a0, a0, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: test5: @@ -95,6 +104,7 @@ ; RV64I-NEXT: slliw a0, a0, 6 ; RV64I-NEXT: lui a1, 1048560 ; RV64I-NEXT: and a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %a = shl i32 %x, 6 %b = and i32 %a, -65536 @@ -109,12 +119,14 @@ ; RV32I-NEXT: or a1, a1, a2 ; RV32I-NEXT: srli a0, a0, 10 ; RV32I-NEXT: slli a0, a0, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: test6: ; RV64I: # %bb.0: ; RV64I-NEXT: srli a0, a0, 10 ; RV64I-NEXT: slli a0, a0, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %a = shl i64 %x, 6 %b = and i64 %a, -65536 diff --git a/llvm/test/CodeGen/RISCV/split-offsets.ll b/llvm/test/CodeGen/RISCV/split-offsets.ll --- a/llvm/test/CodeGen/RISCV/split-offsets.ll +++ b/llvm/test/CodeGen/RISCV/split-offsets.ll @@ -22,6 +22,7 @@ ; RV32I-NEXT: sw a3, 4(a0) ; RV32I-NEXT: sw a3, 0(a1) ; RV32I-NEXT: sw a2, 4(a1) +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: test1: @@ -37,6 +38,7 @@ ; RV64I-NEXT: sw a3, 4(a0) ; RV64I-NEXT: sw a3, 0(a1) ; RV64I-NEXT: sw a2, 4(a1) +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret entry: %s = load [65536 x i32]*, [65536 x i32]** %sp @@ -72,6 +74,7 @@ ; RV32I-NEXT: mv a3, a4 ; RV32I-NEXT: blt a3, a2, .LBB1_1 ; RV32I-NEXT: .LBB1_2: # %while_end +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: test2: @@ -96,6 +99,7 @@ ; RV64I-NEXT: sext.w a4, a3 ; RV64I-NEXT: blt a4, a2, .LBB1_1 ; RV64I-NEXT: .LBB1_2: # %while_end +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret entry: %s = load [65536 x i32]*, [65536 x i32]** %sp diff --git a/llvm/test/CodeGen/RISCV/stack-realignment-with-variable-sized-objects.ll b/llvm/test/CodeGen/RISCV/stack-realignment-with-variable-sized-objects.ll --- a/llvm/test/CodeGen/RISCV/stack-realignment-with-variable-sized-objects.ll +++ b/llvm/test/CodeGen/RISCV/stack-realignment-with-variable-sized-objects.ll @@ -30,8 +30,13 @@ ; RV32I-NEXT: addi sp, s0, -64 ; RV32I-NEXT: lw s1, 52(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s0, 56(sp) # 4-byte Folded Reload +; RV32I-NEXT: cfi_def_cfa sp, 64 ; RV32I-NEXT: lw ra, 60(sp) # 4-byte Folded Reload +; RV32I-NEXT: .cfi_restore ra +; RV32I-NEXT: .cfi_restore s0 +; RV32I-NEXT: .cfi_restore s1 ; RV32I-NEXT: addi sp, sp, 64 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: caller: @@ -59,8 +64,13 @@ ; RV64I-NEXT: addi sp, s0, -64 ; RV64I-NEXT: ld s1, 40(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s0, 48(sp) # 8-byte Folded Reload +; RV64I-NEXT: .cfi_def_cfa sp, 64 ; RV64I-NEXT: ld ra, 56(sp) # 8-byte Folded Reload +; RV64I-NEXT: .cfi_restore ra +; RV64I-NEXT: .cfi_restore s0 +; RV64I-NEXT: .cfi_restore s1 ; RV64I-NEXT: addi sp, sp, 64 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = alloca i8, i32 %n %2 = alloca i32, align 64 diff --git a/llvm/test/CodeGen/RISCV/stack-realignment.ll b/llvm/test/CodeGen/RISCV/stack-realignment.ll --- a/llvm/test/CodeGen/RISCV/stack-realignment.ll +++ b/llvm/test/CodeGen/RISCV/stack-realignment.ll @@ -22,8 +22,12 @@ ; RV32I-NEXT: call callee@plt ; RV32I-NEXT: addi sp, s0, -32 ; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload +; RV32I-NEXT: .cfi_def_cfa sp, 32 ; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload +; RV32I-NEXT: .cfi_restore ra +; RV32I-NEXT: .cfi_restore s0 ; RV32I-NEXT: addi sp, sp, 32 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: caller32: @@ -41,8 +45,12 @@ ; RV64I-NEXT: call callee@plt ; RV64I-NEXT: addi sp, s0, -32 ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload +; RV64I-NEXT: .cfi_def_cfa sp, 32 ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload +; RV64I-NEXT: .cfi_restore ra +; RV64I-NEXT: .cfi_restore s0 ; RV64I-NEXT: addi sp, sp, 32 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = alloca i8, align 32 call void @callee(i8* %1) @@ -59,7 +67,9 @@ ; RV32I-NEXT: mv a0, sp ; RV32I-NEXT: call callee@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: .cfi_restore ra ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: caller_no_realign32: @@ -71,7 +81,9 @@ ; RV64I-NEXT: mv a0, sp ; RV64I-NEXT: call callee@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: .cfi_restore ra ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = alloca i8, align 32 call void @callee(i8* %1) @@ -94,8 +106,12 @@ ; RV32I-NEXT: call callee@plt ; RV32I-NEXT: addi sp, s0, -64 ; RV32I-NEXT: lw s0, 56(sp) # 4-byte Folded Reload +; RV32I-NEXT: .cfi_def_cfa sp, 64 ; RV32I-NEXT: lw ra, 60(sp) # 4-byte Folded Reload +; RV32I-NEXT: .cfi_restore ra +; RV32I-NEXT: .cfi_restore s0 ; RV32I-NEXT: addi sp, sp, 64 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: caller64: @@ -113,8 +129,12 @@ ; RV64I-NEXT: call callee@plt ; RV64I-NEXT: addi sp, s0, -64 ; RV64I-NEXT: ld s0, 48(sp) # 8-byte Folded Reload +; RV64I-NEXT: .cfi_def_cfa sp, 64 ; RV64I-NEXT: ld ra, 56(sp) # 8-byte Folded Reload +; RV64I-NEXT: .cfi_restore ra +; RV64I-NEXT: .cfi_restore s0 ; RV64I-NEXT: addi sp, sp, 64 +; RV64I-NEXT: .cfi_def_cfa_offset ; RV64I-NEXT: ret %1 = alloca i8, align 64 call void @callee(i8* %1) @@ -131,7 +151,9 @@ ; RV32I-NEXT: mv a0, sp ; RV32I-NEXT: call callee@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: .cfi_restore ra ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: caller_no_realign64: @@ -143,7 +165,9 @@ ; RV64I-NEXT: mv a0, sp ; RV64I-NEXT: call callee@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: .cfi_restore ra ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = alloca i8, align 64 call void @callee(i8* %1) @@ -166,8 +190,12 @@ ; RV32I-NEXT: call callee@plt ; RV32I-NEXT: addi sp, s0, -128 ; RV32I-NEXT: lw s0, 120(sp) # 4-byte Folded Reload +; RV32I-NEXT: .cfi_def_cfa sp, 128 ; RV32I-NEXT: lw ra, 124(sp) # 4-byte Folded Reload +; RV32I-NEXT: .cfi_restore ra +; RV32I-NEXT: .cfi_restore s0 ; RV32I-NEXT: addi sp, sp, 128 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: caller128: @@ -185,8 +213,12 @@ ; RV64I-NEXT: call callee@plt ; RV64I-NEXT: addi sp, s0, -128 ; RV64I-NEXT: ld s0, 112(sp) # 8-byte Folded Reload +; RV64I-NEXT: cfi_def_cfa sp, 128 ; RV64I-NEXT: ld ra, 120(sp) # 8-byte Folded Reload +; RV64I-NEXT: .cfi_restore ra +; RV64I-NEXT: .cfi_restore s0 ; RV64I-NEXT: addi sp, sp, 128 +; RV64I-NEXT: .cfi_def_cfa_offset ; RV64I-NEXT: ret %1 = alloca i8, align 128 call void @callee(i8* %1) @@ -203,7 +235,9 @@ ; RV32I-NEXT: mv a0, sp ; RV32I-NEXT: call callee@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: .cfi_restore ra ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: caller_no_realign128: @@ -215,7 +249,9 @@ ; RV64I-NEXT: mv a0, sp ; RV64I-NEXT: call callee@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: .cfi_restore ra ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = alloca i8, align 128 call void @callee(i8* %1) @@ -238,8 +274,12 @@ ; RV32I-NEXT: call callee@plt ; RV32I-NEXT: addi sp, s0, -256 ; RV32I-NEXT: lw s0, 248(sp) # 4-byte Folded Reload +; RV32I-NEXT: .cfi_def_cfa sp, 256 ; RV32I-NEXT: lw ra, 252(sp) # 4-byte Folded Reload +; RV32I-NEXT: .cfi_restore ra +; RV32I-NEXT: .cfi_restore s0 ; RV32I-NEXT: addi sp, sp, 256 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: caller256: @@ -257,8 +297,12 @@ ; RV64I-NEXT: call callee@plt ; RV64I-NEXT: addi sp, s0, -256 ; RV64I-NEXT: ld s0, 240(sp) # 8-byte Folded Reload +; RV64I-NEXT: .cfi_def_cfa sp, 256 ; RV64I-NEXT: ld ra, 248(sp) # 8-byte Folded Reload +; RV64I-NEXT: .cfi_restore ra +; RV64I-NEXT: .cfi_restore s0 ; RV64I-NEXT: addi sp, sp, 256 +; RV64I-NEXT: .cfi_def_cfa_offset ; RV64I-NEXT: ret %1 = alloca i8, align 256 call void @callee(i8* %1) @@ -275,7 +319,9 @@ ; RV32I-NEXT: mv a0, sp ; RV32I-NEXT: call callee@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: .cfi_restore ra ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: caller_no_realign256: @@ -287,7 +333,9 @@ ; RV64I-NEXT: mv a0, sp ; RV64I-NEXT: call callee@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: .cfi_restore ra ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = alloca i8, align 256 call void @callee(i8* %1) @@ -310,8 +358,12 @@ ; RV32I-NEXT: call callee@plt ; RV32I-NEXT: addi sp, s0, -1024 ; RV32I-NEXT: lw s0, 1016(sp) # 4-byte Folded Reload +; RV32I-NEXT: .cfi_def_cfa sp, 1024 ; RV32I-NEXT: lw ra, 1020(sp) # 4-byte Folded Reload +; RV32I-NEXT: .cfi_restore ra +; RV32I-NEXT: .cfi_restore s0 ; RV32I-NEXT: addi sp, sp, 1024 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: caller512: @@ -329,8 +381,12 @@ ; RV64I-NEXT: call callee@plt ; RV64I-NEXT: addi sp, s0, -1024 ; RV64I-NEXT: ld s0, 1008(sp) # 8-byte Folded Reload +; RV64I-NEXT: .cfi_def_cfa sp, 1024 ; RV64I-NEXT: ld ra, 1016(sp) # 8-byte Folded Reload +; RV64I-NEXT: .cfi_restore ra +; RV64I-NEXT: .cfi_restore s0 ; RV64I-NEXT: addi sp, sp, 1024 +; RV64I-NEXT: .cfi_def_cfa_offset ; RV64I-NEXT: ret %1 = alloca i8, align 512 call void @callee(i8* %1) @@ -347,7 +403,9 @@ ; RV32I-NEXT: mv a0, sp ; RV32I-NEXT: call callee@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: .cfi_restore ra ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: caller_no_realign512: @@ -359,7 +417,9 @@ ; RV64I-NEXT: mv a0, sp ; RV64I-NEXT: call callee@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: .cfi_restore ra ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = alloca i8, align 512 call void @callee(i8* %1) @@ -384,8 +444,12 @@ ; RV32I-NEXT: addi sp, s0, -2048 ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: lw s0, 2024(sp) # 4-byte Folded Reload +; RV32I-NEXT: .cfi_def_cfa sp, 2032 ; RV32I-NEXT: lw ra, 2028(sp) # 4-byte Folded Reload +; RV32I-NEXT: .cfi_restore ra +; RV32I-NEXT: .cfi_restore s0 ; RV32I-NEXT: addi sp, sp, 2032 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: caller1024: @@ -405,8 +469,12 @@ ; RV64I-NEXT: addi sp, s0, -2048 ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ld s0, 2016(sp) # 8-byte Folded Reload +; RV64I-NEXT: .cfi_def_cfa sp, 2032 ; RV64I-NEXT: ld ra, 2024(sp) # 8-byte Folded Reload +; RV64I-NEXT: .cfi_restore ra +; RV64I-NEXT: .cfi_restore s0 ; RV64I-NEXT: addi sp, sp, 2032 +; RV64I-NEXT: .cfi_def_cfa_offset ; RV64I-NEXT: ret %1 = alloca i8, align 1024 call void @callee(i8* %1) @@ -423,7 +491,9 @@ ; RV32I-NEXT: mv a0, sp ; RV32I-NEXT: call callee@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: .cfi_restore ra ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: caller_no_realign1024: @@ -435,7 +505,9 @@ ; RV64I-NEXT: mv a0, sp ; RV64I-NEXT: call callee@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: .cfi_restore ra ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = alloca i8, align 1024 call void @callee(i8* %1) @@ -467,8 +539,12 @@ ; RV32I-NEXT: addi a0, a0, -2032 ; RV32I-NEXT: add sp, sp, a0 ; RV32I-NEXT: lw s0, 2024(sp) # 4-byte Folded Reload +; RV32I-NEXT: .cfi_def_cfa sp, 2032 ; RV32I-NEXT: lw ra, 2028(sp) # 4-byte Folded Reload +; RV32I-NEXT: .cfi_restore ra +; RV32I-NEXT: .cfi_restore s0 ; RV32I-NEXT: addi sp, sp, 2032 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: caller2048: @@ -495,8 +571,12 @@ ; RV64I-NEXT: addiw a0, a0, -2032 ; RV64I-NEXT: add sp, sp, a0 ; RV64I-NEXT: ld s0, 2016(sp) # 8-byte Folded Reload +; RV64I-NEXT: .cfi_def_cfa sp, 2032 ; RV64I-NEXT: ld ra, 2024(sp) # 8-byte Folded Reload +; RV64I-NEXT: .cfi_restore ra +; RV64I-NEXT: .cfi_restore s0 ; RV64I-NEXT: addi sp, sp, 2032 +; RV64I-NEXT: .cfi_def_cfa_offset ; RV64I-NEXT: ret %1 = alloca i8, align 2048 call void @callee(i8* %1) @@ -513,7 +593,9 @@ ; RV32I-NEXT: mv a0, sp ; RV32I-NEXT: call callee@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: .cfi_restore ra ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: caller_no_realign2048: @@ -525,7 +607,9 @@ ; RV64I-NEXT: mv a0, sp ; RV64I-NEXT: call callee@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: .cfi_restore ra ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = alloca i8, align 2048 call void @callee(i8* %1) @@ -557,8 +641,12 @@ ; RV32I-NEXT: addi a0, a0, -2032 ; RV32I-NEXT: add sp, sp, a0 ; RV32I-NEXT: lw s0, 2024(sp) # 4-byte Folded Reload +; RV32I-NEXT: .cfi_def_cfa sp, 2032 ; RV32I-NEXT: lw ra, 2028(sp) # 4-byte Folded Reload +; RV32I-NEXT: .cfi_restore ra +; RV32I-NEXT: .cfi_restore s0 ; RV32I-NEXT: addi sp, sp, 2032 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: caller4096: @@ -585,8 +673,12 @@ ; RV64I-NEXT: addiw a0, a0, -2032 ; RV64I-NEXT: add sp, sp, a0 ; RV64I-NEXT: ld s0, 2016(sp) # 8-byte Folded Reload +; RV64I-NEXT: .cfi_def_cfa sp, 2032 ; RV64I-NEXT: ld ra, 2024(sp) # 8-byte Folded Reload +; RV64I-NEXT: .cfi_restore ra +; RV64I-NEXT: .cfi_restore s0 ; RV64I-NEXT: addi sp, sp, 2032 +; RV64I-NEXT: .cfi_def_cfa_offset ; RV64I-NEXT: ret %1 = alloca i8, align 4096 call void @callee(i8* %1) @@ -603,7 +695,9 @@ ; RV32I-NEXT: mv a0, sp ; RV32I-NEXT: call callee@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: .cfi_restore ra ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: caller_no_realign4096: @@ -615,7 +709,9 @@ ; RV64I-NEXT: mv a0, sp ; RV64I-NEXT: call callee@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: .cfi_restore ra ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = alloca i8, align 4096 call void @callee(i8* %1) diff --git a/llvm/test/CodeGen/RISCV/tail-calls.ll b/llvm/test/CodeGen/RISCV/tail-calls.ll --- a/llvm/test/CodeGen/RISCV/tail-calls.ll +++ b/llvm/test/CodeGen/RISCV/tail-calls.ll @@ -71,6 +71,7 @@ ; CHECK-NEXT: mv a4, a5 ; CHECK-NEXT: mv a5, a6 ; CHECK-NEXT: mv a6, a7 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: jr t1 %9 = tail call i32 %0(i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7) ret i32 %9 diff --git a/llvm/test/CodeGen/RISCV/vararg.ll b/llvm/test/CodeGen/RISCV/vararg.ll --- a/llvm/test/CodeGen/RISCV/vararg.ll +++ b/llvm/test/CodeGen/RISCV/vararg.ll @@ -53,6 +53,7 @@ ; ILP32-ILP32F-FPELIM-NEXT: addi a1, sp, 24 ; ILP32-ILP32F-FPELIM-NEXT: sw a1, 12(sp) ; ILP32-ILP32F-FPELIM-NEXT: addi sp, sp, 48 +; ILP32-ILP32F-FPELIM-NEXT: .cfi_def_cfa_offset 0 ; ILP32-ILP32F-FPELIM-NEXT: ret ; ; ILP32-ILP32F-WITHFP-LABEL: va1: @@ -76,8 +77,12 @@ ; ILP32-ILP32F-WITHFP-NEXT: addi a1, s0, 8 ; ILP32-ILP32F-WITHFP-NEXT: sw a1, -12(s0) ; ILP32-ILP32F-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; ILP32-ILP32F-WITHFP-NEXT: .cfi_def_cfa sp, 16 ; ILP32-ILP32F-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; ILP32-ILP32F-WITHFP-NEXT: .cfi_restore ra +; ILP32-ILP32F-WITHFP-NEXT: .cfi_restore s0 ; ILP32-ILP32F-WITHFP-NEXT: addi sp, sp, 48 +; ILP32-ILP32F-WITHFP-NEXT: .cfi_def_cfa_offset 0 ; ILP32-ILP32F-WITHFP-NEXT: ret ; ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-LABEL: va1: @@ -95,6 +100,7 @@ ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: addi a1, sp, 24 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: sw a1, 12(sp) ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: addi sp, sp, 48 +; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: .cfi_def_cfa_offset 0 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: ret ; ; LP64-LP64F-LP64D-FPELIM-LABEL: va1: @@ -112,6 +118,7 @@ ; LP64-LP64F-LP64D-FPELIM-NEXT: sd a0, 8(sp) ; LP64-LP64F-LP64D-FPELIM-NEXT: lw a0, 24(sp) ; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, 80 +; LP64-LP64F-LP64D-FPELIM-NEXT: .cfi_def_cfa_offset 0 ; LP64-LP64F-LP64D-FPELIM-NEXT: ret ; ; LP64-LP64F-LP64D-WITHFP-LABEL: va1: @@ -135,8 +142,12 @@ ; LP64-LP64F-LP64D-WITHFP-NEXT: sd a0, -24(s0) ; LP64-LP64F-LP64D-WITHFP-NEXT: lw a0, 8(s0) ; LP64-LP64F-LP64D-WITHFP-NEXT: ld s0, 16(sp) # 8-byte Folded Reload +; LP64-LP64F-LP64D-WITHFP-NEXT: .cfi_def_cfa sp, 32 ; LP64-LP64F-LP64D-WITHFP-NEXT: ld ra, 24(sp) # 8-byte Folded Reload +; LP64-LP64F-LP64D-WITHFP-NEXT: .cfi_restore ra +; LP64-LP64F-LP64D-WITHFP-NEXT: .cfi_restore s0 ; LP64-LP64F-LP64D-WITHFP-NEXT: addi sp, sp, 96 +; LP64-LP64F-LP64D-WITHFP-NEXT: .cfi_def_cfa_offset 0 ; LP64-LP64F-LP64D-WITHFP-NEXT: ret %va = alloca i8*, align 4 %1 = bitcast i8** %va to i8* @@ -1777,6 +1788,7 @@ ; ILP32-ILP32F-FPELIM-NEXT: lui a1, 24414 ; ILP32-ILP32F-FPELIM-NEXT: addi a1, a1, 304 ; ILP32-ILP32F-FPELIM-NEXT: add sp, sp, a1 +; ILP32-ILP32F-FPELIM-NEXT: .cfi_def_cfa_offset 0 ; ILP32-ILP32F-FPELIM-NEXT: ret ; ; ILP32-ILP32F-WITHFP-LABEL: va_large_stack: @@ -1809,8 +1821,12 @@ ; ILP32-ILP32F-WITHFP-NEXT: addi a1, a1, -1728 ; ILP32-ILP32F-WITHFP-NEXT: add sp, sp, a1 ; ILP32-ILP32F-WITHFP-NEXT: lw s0, 1992(sp) # 4-byte Folded Reload +; ILP32-ILP32F-WITHFP-NEXT: .cfi_def_cfa sp, 2064 ; ILP32-ILP32F-WITHFP-NEXT: lw ra, 1996(sp) # 4-byte Folded Reload +; ILP32-ILP32F-WITHFP-NEXT: .cfi_restore ra +; ILP32-ILP32F-WITHFP-NEXT: .cfi_restore s0 ; ILP32-ILP32F-WITHFP-NEXT: addi sp, sp, 2032 +; ILP32-ILP32F-WITHFP-NEXT: .cfi_def_cfa_offset 0 ; ILP32-ILP32F-WITHFP-NEXT: ret ; ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-LABEL: va_large_stack: @@ -1855,6 +1871,7 @@ ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: lui a1, 24414 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: addi a1, a1, 304 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: add sp, sp, a1 +; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: .cfi_def_cfa_offset 0 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: ret ; ; LP64-LP64F-LP64D-FPELIM-LABEL: va_large_stack: @@ -1902,6 +1919,7 @@ ; LP64-LP64F-LP64D-FPELIM-NEXT: lui a1, 24414 ; LP64-LP64F-LP64D-FPELIM-NEXT: addiw a1, a1, 336 ; LP64-LP64F-LP64D-FPELIM-NEXT: add sp, sp, a1 +; LP64-LP64F-LP64D-FPELIM-NEXT: .cfi_def_cfa_offset ; LP64-LP64F-LP64D-FPELIM-NEXT: ret ; ; LP64-LP64F-LP64D-WITHFP-LABEL: va_large_stack: @@ -1934,8 +1952,12 @@ ; LP64-LP64F-LP64D-WITHFP-NEXT: addiw a1, a1, -1680 ; LP64-LP64F-LP64D-WITHFP-NEXT: add sp, sp, a1 ; LP64-LP64F-LP64D-WITHFP-NEXT: ld s0, 1952(sp) # 8-byte Folded Reload +; LP64-LP64F-LP64D-WITHFP-NEXT: .cfi_def_cfa sp, 2096 ; LP64-LP64F-LP64D-WITHFP-NEXT: ld ra, 1960(sp) # 8-byte Folded Reload +; LP64-LP64F-LP64D-WITHFP-NEXT: .cfi_restore ra +; LP64-LP64F-LP64D-WITHFP-NEXT: .cfi_restore s0 ; LP64-LP64F-LP64D-WITHFP-NEXT: addi sp, sp, 2032 +; LP64-LP64F-LP64D-WITHFP-NEXT: .cfi_def_cfa_offset 0 ; LP64-LP64F-LP64D-WITHFP-NEXT: ret %large = alloca [ 100000000 x i8 ] %va = alloca i8*, align 4 diff --git a/llvm/test/CodeGen/RISCV/vec3-setcc-crash.ll b/llvm/test/CodeGen/RISCV/vec3-setcc-crash.ll --- a/llvm/test/CodeGen/RISCV/vec3-setcc-crash.ll +++ b/llvm/test/CodeGen/RISCV/vec3-setcc-crash.ll @@ -45,6 +45,7 @@ ; RV32-NEXT: .LBB0_8: ; RV32-NEXT: sb a0, 2(a1) ; RV32-NEXT: sh a2, 0(a1) +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vec3_setcc_crash: @@ -83,6 +84,7 @@ ; RV64-NEXT: .LBB0_8: ; RV64-NEXT: sb a0, 2(a1) ; RV64-NEXT: sh a2, 0(a1) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = load <3 x i8>, <3 x i8>* %in %cmp = icmp sgt <3 x i8> %a, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/xaluo.ll b/llvm/test/CodeGen/RISCV/xaluo.ll --- a/llvm/test/CodeGen/RISCV/xaluo.ll +++ b/llvm/test/CodeGen/RISCV/xaluo.ll @@ -15,6 +15,7 @@ ; RV32-NEXT: slti a1, a1, 0 ; RV32-NEXT: xor a0, a1, a0 ; RV32-NEXT: sw a3, 0(a2) +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: saddo1.i32: @@ -26,6 +27,7 @@ ; RV64-NEXT: xor a0, a0, a3 ; RV64-NEXT: snez a0, a0 ; RV64-NEXT: sw a3, 0(a2) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: saddo1.i32: @@ -35,6 +37,7 @@ ; RV32ZBA-NEXT: slti a1, a1, 0 ; RV32ZBA-NEXT: xor a0, a1, a0 ; RV32ZBA-NEXT: sw a3, 0(a2) +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: saddo1.i32: @@ -62,6 +65,7 @@ ; RV32-NEXT: addi a2, a0, 4 ; RV32-NEXT: slt a0, a2, a0 ; RV32-NEXT: sw a2, 0(a1) +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: saddo2.i32: @@ -72,6 +76,7 @@ ; RV64-NEXT: xor a0, a0, a2 ; RV64-NEXT: snez a0, a0 ; RV64-NEXT: sw a2, 0(a1) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: saddo2.i32: @@ -79,6 +84,7 @@ ; RV32ZBA-NEXT: addi a2, a0, 4 ; RV32ZBA-NEXT: slt a0, a2, a0 ; RV32ZBA-NEXT: sw a2, 0(a1) +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: saddo2.i32: @@ -106,6 +112,7 @@ ; RV32-NEXT: slt a0, a2, a0 ; RV32-NEXT: xori a0, a0, 1 ; RV32-NEXT: sw a2, 0(a1) +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: saddo3.i32: @@ -116,6 +123,7 @@ ; RV64-NEXT: xor a0, a0, a2 ; RV64-NEXT: snez a0, a0 ; RV64-NEXT: sw a2, 0(a1) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: saddo3.i32: @@ -124,6 +132,7 @@ ; RV32ZBA-NEXT: slt a0, a2, a0 ; RV32ZBA-NEXT: xori a0, a0, 1 ; RV32ZBA-NEXT: sw a2, 0(a1) +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: saddo3.i32: @@ -152,6 +161,7 @@ ; RV32-NEXT: add a2, a0, a2 ; RV32-NEXT: slt a0, a2, a0 ; RV32-NEXT: sw a2, 0(a1) +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: saddo4.i32: @@ -164,6 +174,7 @@ ; RV64-NEXT: xor a0, a0, a3 ; RV64-NEXT: snez a0, a0 ; RV64-NEXT: sw a3, 0(a1) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: saddo4.i32: @@ -173,6 +184,7 @@ ; RV32ZBA-NEXT: add a2, a0, a2 ; RV32ZBA-NEXT: slt a0, a2, a0 ; RV32ZBA-NEXT: sw a2, 0(a1) +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: saddo4.i32: @@ -208,6 +220,7 @@ ; RV32-NEXT: slti a0, a0, 0 ; RV32-NEXT: sw a2, 0(a4) ; RV32-NEXT: sw a5, 4(a4) +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: saddo1.i64: @@ -217,6 +230,7 @@ ; RV64-NEXT: slti a1, a1, 0 ; RV64-NEXT: xor a0, a1, a0 ; RV64-NEXT: sd a3, 0(a2) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: saddo1.i64: @@ -232,6 +246,7 @@ ; RV32ZBA-NEXT: slti a0, a0, 0 ; RV32ZBA-NEXT: sw a2, 0(a4) ; RV32ZBA-NEXT: sw a5, 4(a4) +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: saddo1.i64: @@ -262,6 +277,7 @@ ; RV32-NEXT: slti a0, a0, 0 ; RV32-NEXT: sw a3, 0(a2) ; RV32-NEXT: sw a4, 4(a2) +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: saddo2.i64: @@ -269,6 +285,7 @@ ; RV64-NEXT: addi a2, a0, 4 ; RV64-NEXT: slt a0, a2, a0 ; RV64-NEXT: sd a2, 0(a1) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: saddo2.i64: @@ -282,6 +299,7 @@ ; RV32ZBA-NEXT: slti a0, a0, 0 ; RV32ZBA-NEXT: sw a3, 0(a2) ; RV32ZBA-NEXT: sw a4, 4(a2) +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: saddo2.i64: @@ -310,6 +328,7 @@ ; RV32-NEXT: slti a0, a0, 0 ; RV32-NEXT: sw a3, 0(a2) ; RV32-NEXT: sw a4, 4(a2) +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: saddo3.i64: @@ -318,6 +337,7 @@ ; RV64-NEXT: slt a0, a2, a0 ; RV64-NEXT: xori a0, a0, 1 ; RV64-NEXT: sd a2, 0(a1) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: saddo3.i64: @@ -331,6 +351,7 @@ ; RV32ZBA-NEXT: slti a0, a0, 0 ; RV32ZBA-NEXT: sw a3, 0(a2) ; RV32ZBA-NEXT: sw a4, 4(a2) +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: saddo3.i64: @@ -354,6 +375,7 @@ ; RV32-NEXT: add a1, a0, a1 ; RV32-NEXT: sltu a0, a1, a0 ; RV32-NEXT: sw a1, 0(a2) +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: uaddo.i32: @@ -362,6 +384,7 @@ ; RV64-NEXT: sext.w a0, a0 ; RV64-NEXT: sltu a0, a1, a0 ; RV64-NEXT: sw a1, 0(a2) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: uaddo.i32: @@ -369,6 +392,7 @@ ; RV32ZBA-NEXT: add a1, a0, a1 ; RV32ZBA-NEXT: sltu a0, a1, a0 ; RV32ZBA-NEXT: sw a1, 0(a2) +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: uaddo.i32: @@ -392,6 +416,7 @@ ; RV32-NEXT: addi a2, a0, -2 ; RV32-NEXT: sltu a0, a2, a0 ; RV32-NEXT: sw a2, 0(a1) +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: uaddo.i32.constant: @@ -400,6 +425,7 @@ ; RV64-NEXT: addiw a3, a0, -2 ; RV64-NEXT: sltu a0, a3, a2 ; RV64-NEXT: sw a3, 0(a1) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: uaddo.i32.constant: @@ -407,6 +433,7 @@ ; RV32ZBA-NEXT: addi a2, a0, -2 ; RV32ZBA-NEXT: sltu a0, a2, a0 ; RV32ZBA-NEXT: sw a2, 0(a1) +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: uaddo.i32.constant: @@ -437,6 +464,7 @@ ; RV32-NEXT: .LBB9_2: # %entry ; RV32-NEXT: sw a2, 0(a4) ; RV32-NEXT: sw a3, 4(a4) +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: uaddo.i64: @@ -444,6 +472,7 @@ ; RV64-NEXT: add a1, a0, a1 ; RV64-NEXT: sltu a0, a1, a0 ; RV64-NEXT: sd a1, 0(a2) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: uaddo.i64: @@ -458,6 +487,7 @@ ; RV32ZBA-NEXT: .LBB9_2: # %entry ; RV32ZBA-NEXT: sw a2, 0(a4) ; RV32ZBA-NEXT: sw a3, 4(a4) +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: uaddo.i64: @@ -482,6 +512,7 @@ ; RV32-NEXT: slt a0, a1, a0 ; RV32-NEXT: xor a0, a3, a0 ; RV32-NEXT: sw a1, 0(a2) +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: ssubo1.i32: @@ -493,6 +524,7 @@ ; RV64-NEXT: xor a0, a0, a3 ; RV64-NEXT: snez a0, a0 ; RV64-NEXT: sw a3, 0(a2) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: ssubo1.i32: @@ -502,6 +534,7 @@ ; RV32ZBA-NEXT: slt a0, a1, a0 ; RV32ZBA-NEXT: xor a0, a3, a0 ; RV32ZBA-NEXT: sw a1, 0(a2) +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: ssubo1.i32: @@ -528,6 +561,7 @@ ; RV32-NEXT: addi a2, a0, 4 ; RV32-NEXT: slt a0, a2, a0 ; RV32-NEXT: sw a2, 0(a1) +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: ssubo2.i32: @@ -538,6 +572,7 @@ ; RV64-NEXT: xor a0, a0, a2 ; RV64-NEXT: snez a0, a0 ; RV64-NEXT: sw a2, 0(a1) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: ssubo2.i32: @@ -545,6 +580,7 @@ ; RV32ZBA-NEXT: addi a2, a0, 4 ; RV32ZBA-NEXT: slt a0, a2, a0 ; RV32ZBA-NEXT: sw a2, 0(a1) +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: ssubo2.i32: @@ -578,6 +614,7 @@ ; RV32-NEXT: sw a0, 0(a4) ; RV32-NEXT: sw a5, 4(a4) ; RV32-NEXT: mv a0, a1 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: ssubo.i64: @@ -587,6 +624,7 @@ ; RV64-NEXT: slt a0, a1, a0 ; RV64-NEXT: xor a0, a3, a0 ; RV64-NEXT: sd a1, 0(a2) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: ssubo.i64: @@ -602,6 +640,7 @@ ; RV32ZBA-NEXT: sw a0, 0(a4) ; RV32ZBA-NEXT: sw a5, 4(a4) ; RV32ZBA-NEXT: mv a0, a1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: ssubo.i64: @@ -626,6 +665,7 @@ ; RV32-NEXT: sub a1, a0, a1 ; RV32-NEXT: sltu a0, a0, a1 ; RV32-NEXT: sw a1, 0(a2) +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: usubo.i32: @@ -634,6 +674,7 @@ ; RV64-NEXT: sext.w a0, a0 ; RV64-NEXT: sltu a0, a0, a1 ; RV64-NEXT: sw a1, 0(a2) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: usubo.i32: @@ -641,6 +682,7 @@ ; RV32ZBA-NEXT: sub a1, a0, a1 ; RV32ZBA-NEXT: sltu a0, a0, a1 ; RV32ZBA-NEXT: sw a1, 0(a2) +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: usubo.i32: @@ -664,6 +706,7 @@ ; RV32-NEXT: addi a2, a0, 2 ; RV32-NEXT: sltu a0, a0, a2 ; RV32-NEXT: sw a2, 0(a1) +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: usubo.i32.constant.rhs: @@ -672,6 +715,7 @@ ; RV64-NEXT: sext.w a0, a0 ; RV64-NEXT: sltu a0, a0, a2 ; RV64-NEXT: sw a2, 0(a1) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: usubo.i32.constant.rhs: @@ -679,6 +723,7 @@ ; RV32ZBA-NEXT: addi a2, a0, 2 ; RV32ZBA-NEXT: sltu a0, a0, a2 ; RV32ZBA-NEXT: sw a2, 0(a1) +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: usubo.i32.constant.rhs: @@ -704,6 +749,7 @@ ; RV32-NEXT: addi a0, a2, 1 ; RV32-NEXT: seqz a0, a0 ; RV32-NEXT: sw a2, 0(a1) +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: usubo.i32.constant.lhs: @@ -713,6 +759,7 @@ ; RV64-NEXT: addi a0, a2, 1 ; RV64-NEXT: seqz a0, a0 ; RV64-NEXT: sw a2, 0(a1) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: usubo.i32.constant.lhs: @@ -722,6 +769,7 @@ ; RV32ZBA-NEXT: addi a0, a2, 1 ; RV32ZBA-NEXT: seqz a0, a0 ; RV32ZBA-NEXT: sw a2, 0(a1) +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: usubo.i32.constant.lhs: @@ -756,6 +804,7 @@ ; RV32-NEXT: .LBB16_3: # %entry ; RV32-NEXT: sw a2, 0(a4) ; RV32-NEXT: sw a3, 4(a4) +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: usubo.i64: @@ -763,6 +812,7 @@ ; RV64-NEXT: sub a1, a0, a1 ; RV64-NEXT: sltu a0, a0, a1 ; RV64-NEXT: sd a1, 0(a2) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: usubo.i64: @@ -780,6 +830,7 @@ ; RV32ZBA-NEXT: .LBB16_3: # %entry ; RV32ZBA-NEXT: sw a2, 0(a4) ; RV32ZBA-NEXT: sw a3, 4(a4) +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: usubo.i64: @@ -805,6 +856,7 @@ ; RV32-NEXT: xor a0, a3, a0 ; RV32-NEXT: snez a0, a0 ; RV32-NEXT: sw a1, 0(a2) +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: smulo.i32: @@ -816,6 +868,7 @@ ; RV64-NEXT: xor a0, a0, a3 ; RV64-NEXT: snez a0, a0 ; RV64-NEXT: sw a3, 0(a2) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: smulo.i32: @@ -826,6 +879,7 @@ ; RV32ZBA-NEXT: xor a0, a3, a0 ; RV32ZBA-NEXT: snez a0, a0 ; RV32ZBA-NEXT: sw a1, 0(a2) +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: smulo.i32: @@ -856,6 +910,7 @@ ; RV32-NEXT: xor a0, a3, a0 ; RV32-NEXT: snez a0, a0 ; RV32-NEXT: sw a2, 0(a1) +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: smulo2.i32: @@ -867,6 +922,7 @@ ; RV64-NEXT: xor a0, a0, a3 ; RV64-NEXT: snez a0, a0 ; RV64-NEXT: sw a3, 0(a1) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: smulo2.i32: @@ -878,6 +934,7 @@ ; RV32ZBA-NEXT: xor a0, a3, a0 ; RV32ZBA-NEXT: snez a0, a0 ; RV32ZBA-NEXT: sw a2, 0(a1) +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: smulo2.i32: @@ -963,7 +1020,12 @@ ; RV32-NEXT: lw s2, 4(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s1, 8(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s0, 12(sp) # 4-byte Folded Reload +; RV32-NEXT: .cfi_restore s0 +; RV32-NEXT: .cfi_restore s1 +; RV32-NEXT: .cfi_restore s2 +; RV32-NEXT: .cfi_restore s3 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: smulo.i64: @@ -974,6 +1036,7 @@ ; RV64-NEXT: xor a0, a3, a0 ; RV64-NEXT: snez a0, a0 ; RV64-NEXT: sd a1, 0(a2) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: smulo.i64: @@ -1040,6 +1103,11 @@ ; RV32ZBA-NEXT: lw s2, 4(sp) # 4-byte Folded Reload ; RV32ZBA-NEXT: lw s1, 8(sp) # 4-byte Folded Reload ; RV32ZBA-NEXT: lw s0, 12(sp) # 4-byte Folded Reload +; RV32ZBA-NEXT: .cfi_restore s0 +; RV32ZBA-NEXT: .cfi_restore s1 +; RV32ZBA-NEXT: .cfi_restore s2 +; RV32ZBA-NEXT: .cfi_endproc +; RV32ZBA-NEXT: .cfi_restore s3 ; RV32ZBA-NEXT: addi sp, sp, 16 ; RV32ZBA-NEXT: ret ; @@ -1085,6 +1153,7 @@ ; RV32-NEXT: sw a0, 0(a2) ; RV32-NEXT: sw t0, 4(a2) ; RV32-NEXT: mv a0, a1 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: smulo2.i64: @@ -1096,6 +1165,7 @@ ; RV64-NEXT: xor a0, a3, a0 ; RV64-NEXT: snez a0, a0 ; RV64-NEXT: sd a2, 0(a1) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: smulo2.i64: @@ -1122,6 +1192,7 @@ ; RV32ZBA-NEXT: sw a0, 0(a2) ; RV32ZBA-NEXT: sw t0, 4(a2) ; RV32ZBA-NEXT: mv a0, a1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: smulo2.i64: @@ -1150,6 +1221,7 @@ ; RV32-NEXT: mul a0, a0, a1 ; RV32-NEXT: sw a0, 0(a2) ; RV32-NEXT: mv a0, a3 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: umulo.i32: @@ -1160,6 +1232,7 @@ ; RV64-NEXT: srli a0, a1, 32 ; RV64-NEXT: snez a0, a0 ; RV64-NEXT: sw a1, 0(a2) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: umulo.i32: @@ -1169,6 +1242,7 @@ ; RV32ZBA-NEXT: mul a0, a0, a1 ; RV32ZBA-NEXT: sw a0, 0(a2) ; RV32ZBA-NEXT: mv a0, a3 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: umulo.i32: @@ -1197,6 +1271,7 @@ ; RV32-NEXT: mul a0, a0, a3 ; RV32-NEXT: sw a0, 0(a1) ; RV32-NEXT: mv a0, a2 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: umulo2.i32: @@ -1208,6 +1283,7 @@ ; RV64-NEXT: srli a0, a2, 32 ; RV64-NEXT: snez a0, a0 ; RV64-NEXT: sw a2, 0(a1) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: umulo2.i32: @@ -1218,6 +1294,7 @@ ; RV32ZBA-NEXT: mul a0, a0, a3 ; RV32ZBA-NEXT: sw a0, 0(a1) ; RV32ZBA-NEXT: mv a0, a2 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: umulo2.i32: @@ -1246,6 +1323,7 @@ ; RV32-NEXT: snez a0, a0 ; RV32-NEXT: sw a0, 0(a2) ; RV32-NEXT: mv a0, a3 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: umulo3.i32: @@ -1257,6 +1335,7 @@ ; RV64-NEXT: snez a1, a1 ; RV64-NEXT: sext.w a0, a0 ; RV64-NEXT: sw a1, 0(a2) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: umulo3.i32: @@ -1266,6 +1345,7 @@ ; RV32ZBA-NEXT: snez a0, a0 ; RV32ZBA-NEXT: sw a0, 0(a2) ; RV32ZBA-NEXT: mv a0, a3 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: umulo3.i32: @@ -1309,6 +1389,7 @@ ; RV32-NEXT: sw a0, 0(a4) ; RV32-NEXT: sw a6, 4(a4) ; RV32-NEXT: mv a0, a1 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: umulo.i64: @@ -1318,6 +1399,7 @@ ; RV64-NEXT: mul a0, a0, a1 ; RV64-NEXT: sd a0, 0(a2) ; RV64-NEXT: mv a0, a3 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: umulo.i64: @@ -1342,6 +1424,7 @@ ; RV32ZBA-NEXT: sw a0, 0(a4) ; RV32ZBA-NEXT: sw a6, 4(a4) ; RV32ZBA-NEXT: mv a0, a1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: umulo.i64: @@ -1375,6 +1458,7 @@ ; RV32-NEXT: sw a0, 0(a2) ; RV32-NEXT: sw a4, 4(a2) ; RV32-NEXT: mv a0, a1 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: umulo2.i64: @@ -1385,6 +1469,7 @@ ; RV64-NEXT: mul a0, a0, a3 ; RV64-NEXT: sd a0, 0(a1) ; RV64-NEXT: mv a0, a2 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: umulo2.i64: @@ -1401,6 +1486,7 @@ ; RV32ZBA-NEXT: sw a0, 0(a2) ; RV32ZBA-NEXT: sw a4, 4(a2) ; RV32ZBA-NEXT: mv a0, a1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: umulo2.i64: @@ -1434,6 +1520,7 @@ ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: mv a0, a1 ; RV32-NEXT: .LBB26_2: # %entry +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: saddo.select.i32: @@ -1446,6 +1533,7 @@ ; RV64-NEXT: # %bb.1: # %entry ; RV64-NEXT: mv a0, a1 ; RV64-NEXT: .LBB26_2: # %entry +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: saddo.select.i32: @@ -1457,6 +1545,7 @@ ; RV32ZBA-NEXT: # %bb.1: # %entry ; RV32ZBA-NEXT: mv a0, a1 ; RV32ZBA-NEXT: .LBB26_2: # %entry +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: saddo.select.i32: @@ -1485,6 +1574,7 @@ ; RV32-NEXT: slti a1, a1, 0 ; RV32-NEXT: xor a0, a1, a0 ; RV32-NEXT: xori a0, a0, 1 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: saddo.not.i32: @@ -1495,6 +1585,7 @@ ; RV64-NEXT: addw a0, a0, a1 ; RV64-NEXT: xor a0, a0, a2 ; RV64-NEXT: seqz a0, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: saddo.not.i32: @@ -1504,6 +1595,7 @@ ; RV32ZBA-NEXT: slti a1, a1, 0 ; RV32ZBA-NEXT: xor a0, a1, a0 ; RV32ZBA-NEXT: xori a0, a0, 1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: saddo.not.i32: @@ -1538,6 +1630,7 @@ ; RV32-NEXT: mv a0, a2 ; RV32-NEXT: mv a1, a3 ; RV32-NEXT: .LBB28_2: # %entry +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: saddo.select.i64: @@ -1549,6 +1642,7 @@ ; RV64-NEXT: # %bb.1: # %entry ; RV64-NEXT: mv a0, a1 ; RV64-NEXT: .LBB28_2: # %entry +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: saddo.select.i64: @@ -1566,6 +1660,7 @@ ; RV32ZBA-NEXT: mv a0, a2 ; RV32ZBA-NEXT: mv a1, a3 ; RV32ZBA-NEXT: .LBB28_2: # %entry +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: saddo.select.i64: @@ -1598,6 +1693,7 @@ ; RV32-NEXT: and a0, a1, a0 ; RV32-NEXT: addi a1, zero, -1 ; RV32-NEXT: slt a0, a1, a0 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: saddo.not.i64: @@ -1607,6 +1703,7 @@ ; RV64-NEXT: slti a1, a1, 0 ; RV64-NEXT: xor a0, a1, a0 ; RV64-NEXT: xori a0, a0, 1 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: saddo.not.i64: @@ -1621,6 +1718,7 @@ ; RV32ZBA-NEXT: and a0, a1, a0 ; RV32ZBA-NEXT: addi a1, zero, -1 ; RV32ZBA-NEXT: slt a0, a1, a0 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: saddo.not.i64: @@ -1646,6 +1744,7 @@ ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: mv a0, a1 ; RV32-NEXT: .LBB30_2: # %entry +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: uaddo.select.i32: @@ -1656,6 +1755,7 @@ ; RV64-NEXT: # %bb.1: # %entry ; RV64-NEXT: mv a0, a1 ; RV64-NEXT: .LBB30_2: # %entry +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: uaddo.select.i32: @@ -1665,6 +1765,7 @@ ; RV32ZBA-NEXT: # %bb.1: # %entry ; RV32ZBA-NEXT: mv a0, a1 ; RV32ZBA-NEXT: .LBB30_2: # %entry +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: uaddo.select.i32: @@ -1689,6 +1790,7 @@ ; RV32-NEXT: add a1, a0, a1 ; RV32-NEXT: sltu a0, a1, a0 ; RV32-NEXT: xori a0, a0, 1 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: uaddo.not.i32: @@ -1697,6 +1799,7 @@ ; RV64-NEXT: sext.w a0, a0 ; RV64-NEXT: sltu a0, a1, a0 ; RV64-NEXT: xori a0, a0, 1 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: uaddo.not.i32: @@ -1704,6 +1807,7 @@ ; RV32ZBA-NEXT: add a1, a0, a1 ; RV32ZBA-NEXT: sltu a0, a1, a0 ; RV32ZBA-NEXT: xori a0, a0, 1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: uaddo.not.i32: @@ -1731,6 +1835,7 @@ ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: beqz a4, .LBB32_4 ; RV32-NEXT: .LBB32_2: # %entry +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; RV32-NEXT: .LBB32_3: # %entry ; RV32-NEXT: sltu a4, a5, a1 @@ -1738,6 +1843,7 @@ ; RV32-NEXT: .LBB32_4: # %entry ; RV32-NEXT: mv a0, a2 ; RV32-NEXT: mv a1, a3 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: uaddo.select.i64: @@ -1747,6 +1853,7 @@ ; RV64-NEXT: # %bb.1: # %entry ; RV64-NEXT: mv a0, a1 ; RV64-NEXT: .LBB32_2: # %entry +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: uaddo.select.i64: @@ -1759,6 +1866,7 @@ ; RV32ZBA-NEXT: # %bb.1: # %entry ; RV32ZBA-NEXT: beqz a4, .LBB32_4 ; RV32ZBA-NEXT: .LBB32_2: # %entry +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; RV32ZBA-NEXT: .LBB32_3: # %entry ; RV32ZBA-NEXT: sltu a4, a5, a1 @@ -1766,6 +1874,7 @@ ; RV32ZBA-NEXT: .LBB32_4: # %entry ; RV32ZBA-NEXT: mv a0, a2 ; RV32ZBA-NEXT: mv a1, a3 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: uaddo.select.i64: @@ -1795,6 +1904,7 @@ ; RV32-NEXT: sltu a0, a2, a1 ; RV32-NEXT: .LBB33_2: # %entry ; RV32-NEXT: xori a0, a0, 1 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: uaddo.not.i64: @@ -1802,6 +1912,7 @@ ; RV64-NEXT: add a1, a0, a1 ; RV64-NEXT: sltu a0, a1, a0 ; RV64-NEXT: xori a0, a0, 1 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: uaddo.not.i64: @@ -1815,6 +1926,7 @@ ; RV32ZBA-NEXT: sltu a0, a2, a1 ; RV32ZBA-NEXT: .LBB33_2: # %entry ; RV32ZBA-NEXT: xori a0, a0, 1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: uaddo.not.i64: @@ -1840,6 +1952,7 @@ ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: mv a0, a1 ; RV32-NEXT: .LBB34_2: # %entry +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: ssubo.select.i32: @@ -1852,6 +1965,7 @@ ; RV64-NEXT: # %bb.1: # %entry ; RV64-NEXT: mv a0, a1 ; RV64-NEXT: .LBB34_2: # %entry +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: ssubo.select.i32: @@ -1863,6 +1977,7 @@ ; RV32ZBA-NEXT: # %bb.1: # %entry ; RV32ZBA-NEXT: mv a0, a1 ; RV32ZBA-NEXT: .LBB34_2: # %entry +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: ssubo.select.i32: @@ -1891,6 +2006,7 @@ ; RV32-NEXT: slt a0, a1, a0 ; RV32-NEXT: xor a0, a2, a0 ; RV32-NEXT: xori a0, a0, 1 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: ssubo.not.i32: @@ -1901,6 +2017,7 @@ ; RV64-NEXT: subw a0, a0, a1 ; RV64-NEXT: xor a0, a0, a2 ; RV64-NEXT: seqz a0, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: ssubo.not.i32: @@ -1910,6 +2027,7 @@ ; RV32ZBA-NEXT: slt a0, a1, a0 ; RV32ZBA-NEXT: xor a0, a2, a0 ; RV32ZBA-NEXT: xori a0, a0, 1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: ssubo.not.i32: @@ -1942,6 +2060,7 @@ ; RV32-NEXT: mv a0, a2 ; RV32-NEXT: mv a1, a3 ; RV32-NEXT: .LBB36_2: # %entry +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: ssubo.select.i64: @@ -1953,6 +2072,7 @@ ; RV64-NEXT: # %bb.1: # %entry ; RV64-NEXT: mv a0, a1 ; RV64-NEXT: .LBB36_2: # %entry +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: ssubo.select.i64: @@ -1968,6 +2088,7 @@ ; RV32ZBA-NEXT: mv a0, a2 ; RV32ZBA-NEXT: mv a1, a3 ; RV32ZBA-NEXT: .LBB36_2: # %entry +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: ssubo.select.i64: @@ -1998,6 +2119,7 @@ ; RV32-NEXT: and a0, a1, a0 ; RV32-NEXT: addi a1, zero, -1 ; RV32-NEXT: slt a0, a1, a0 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: ssub.not.i64: @@ -2007,6 +2129,7 @@ ; RV64-NEXT: slt a0, a1, a0 ; RV64-NEXT: xor a0, a2, a0 ; RV64-NEXT: xori a0, a0, 1 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: ssub.not.i64: @@ -2019,6 +2142,7 @@ ; RV32ZBA-NEXT: and a0, a1, a0 ; RV32ZBA-NEXT: addi a1, zero, -1 ; RV32ZBA-NEXT: slt a0, a1, a0 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: ssub.not.i64: @@ -2044,6 +2168,7 @@ ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: mv a0, a1 ; RV32-NEXT: .LBB38_2: # %entry +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: usubo.select.i32: @@ -2054,6 +2179,7 @@ ; RV64-NEXT: # %bb.1: # %entry ; RV64-NEXT: mv a0, a1 ; RV64-NEXT: .LBB38_2: # %entry +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: usubo.select.i32: @@ -2063,6 +2189,7 @@ ; RV32ZBA-NEXT: # %bb.1: # %entry ; RV32ZBA-NEXT: mv a0, a1 ; RV32ZBA-NEXT: .LBB38_2: # %entry +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: usubo.select.i32: @@ -2087,6 +2214,7 @@ ; RV32-NEXT: sub a1, a0, a1 ; RV32-NEXT: sltu a0, a0, a1 ; RV32-NEXT: xori a0, a0, 1 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: usubo.not.i32: @@ -2095,6 +2223,7 @@ ; RV64-NEXT: sext.w a0, a0 ; RV64-NEXT: sltu a0, a0, a1 ; RV64-NEXT: xori a0, a0, 1 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: usubo.not.i32: @@ -2102,6 +2231,7 @@ ; RV32ZBA-NEXT: sub a1, a0, a1 ; RV32ZBA-NEXT: sltu a0, a0, a1 ; RV32ZBA-NEXT: xori a0, a0, 1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: usubo.not.i32: @@ -2137,6 +2267,7 @@ ; RV32-NEXT: mv a0, a2 ; RV32-NEXT: mv a1, a3 ; RV32-NEXT: .LBB40_4: # %entry +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: usubo.select.i64: @@ -2146,6 +2277,7 @@ ; RV64-NEXT: # %bb.1: # %entry ; RV64-NEXT: mv a0, a1 ; RV64-NEXT: .LBB40_2: # %entry +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: usubo.select.i64: @@ -2166,6 +2298,7 @@ ; RV32ZBA-NEXT: mv a0, a2 ; RV32ZBA-NEXT: mv a1, a3 ; RV32ZBA-NEXT: .LBB40_4: # %entry +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: usubo.select.i64: @@ -2193,11 +2326,13 @@ ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: sltu a0, a1, a3 ; RV32-NEXT: xori a0, a0, 1 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; RV32-NEXT: .LBB41_2: ; RV32-NEXT: sub a1, a0, a2 ; RV32-NEXT: sltu a0, a0, a1 ; RV32-NEXT: xori a0, a0, 1 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: usubo.not.i64: @@ -2205,6 +2340,7 @@ ; RV64-NEXT: sub a1, a0, a1 ; RV64-NEXT: sltu a0, a0, a1 ; RV64-NEXT: xori a0, a0, 1 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: usubo.not.i64: @@ -2216,11 +2352,13 @@ ; RV32ZBA-NEXT: # %bb.1: # %entry ; RV32ZBA-NEXT: sltu a0, a1, a3 ; RV32ZBA-NEXT: xori a0, a0, 1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; RV32ZBA-NEXT: .LBB41_2: ; RV32ZBA-NEXT: sub a1, a0, a2 ; RV32ZBA-NEXT: sltu a0, a0, a1 ; RV32ZBA-NEXT: xori a0, a0, 1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: usubo.not.i64: @@ -2246,6 +2384,7 @@ ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: mv a0, a1 ; RV32-NEXT: .LBB42_2: # %entry +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: smulo.select.i32: @@ -2258,6 +2397,7 @@ ; RV64-NEXT: # %bb.1: # %entry ; RV64-NEXT: mv a0, a1 ; RV64-NEXT: .LBB42_2: # %entry +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: smulo.select.i32: @@ -2269,6 +2409,7 @@ ; RV32ZBA-NEXT: # %bb.1: # %entry ; RV32ZBA-NEXT: mv a0, a1 ; RV32ZBA-NEXT: .LBB42_2: # %entry +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: smulo.select.i32: @@ -2297,6 +2438,7 @@ ; RV32-NEXT: srai a0, a0, 31 ; RV32-NEXT: xor a0, a2, a0 ; RV32-NEXT: seqz a0, a0 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: smulo.not.i32: @@ -2307,6 +2449,7 @@ ; RV64-NEXT: mulw a0, a0, a1 ; RV64-NEXT: xor a0, a0, a2 ; RV64-NEXT: seqz a0, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: smulo.not.i32: @@ -2316,6 +2459,7 @@ ; RV32ZBA-NEXT: srai a0, a0, 31 ; RV32ZBA-NEXT: xor a0, a2, a0 ; RV32ZBA-NEXT: seqz a0, a0 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: smulo.not.i32: @@ -2396,7 +2540,11 @@ ; RV32-NEXT: lw s2, 4(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s1, 8(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s0, 12(sp) # 4-byte Folded Reload +; RV32-NEXT: .cfi_restore s0 +; RV32-NEXT: .cfi_restore s1 +; RV32-NEXT: .cfi_restore s2 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: smulo.select.i64: @@ -2408,6 +2556,7 @@ ; RV64-NEXT: # %bb.1: # %entry ; RV64-NEXT: mv a0, a1 ; RV64-NEXT: .LBB44_2: # %entry +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: smulo.select.i64: @@ -2471,6 +2620,11 @@ ; RV32ZBA-NEXT: lw s2, 4(sp) # 4-byte Folded Reload ; RV32ZBA-NEXT: lw s1, 8(sp) # 4-byte Folded Reload ; RV32ZBA-NEXT: lw s0, 12(sp) # 4-byte Folded Reload +; RV32ZBA-NEXT: .cfi_restore s0 +; RV32ZBA-NEXT: .cfi_restore s1 +; RV32ZBA-NEXT: .cfi_restore s2 +; RV32ZBA-NEXT: .cfi_endproc +; RV32ZBA-NEXT: .cfi_restore s3 ; RV32ZBA-NEXT: addi sp, sp, 16 ; RV32ZBA-NEXT: ret ; @@ -2549,7 +2703,11 @@ ; RV32-NEXT: lw s2, 4(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s1, 8(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s0, 12(sp) # 4-byte Folded Reload +; RV32-NEXT: .cfi_restore s0 +; RV32-NEXT: .cfi_restore s1 +; RV32-NEXT: .cfi_restore s2 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: smulo.not.i64: @@ -2559,6 +2717,7 @@ ; RV64-NEXT: srai a0, a0, 63 ; RV64-NEXT: xor a0, a2, a0 ; RV64-NEXT: seqz a0, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: smulo.not.i64: @@ -2618,6 +2777,11 @@ ; RV32ZBA-NEXT: lw s2, 4(sp) # 4-byte Folded Reload ; RV32ZBA-NEXT: lw s1, 8(sp) # 4-byte Folded Reload ; RV32ZBA-NEXT: lw s0, 12(sp) # 4-byte Folded Reload +; RV32ZBA-NEXT: .cfi_restore s0 +; RV32ZBA-NEXT: .cfi_restore s1 +; RV32ZBA-NEXT: .cfi_restore s2 +; RV32ZBA-NEXT: .cfi_endproc +; RV32ZBA-NEXT: .cfi_restore s3 ; RV32ZBA-NEXT: addi sp, sp, 16 ; RV32ZBA-NEXT: ret ; @@ -2644,6 +2808,7 @@ ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: mv a0, a1 ; RV32-NEXT: .LBB46_2: # %entry +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: umulo.select.i32: @@ -2656,6 +2821,7 @@ ; RV64-NEXT: # %bb.1: # %entry ; RV64-NEXT: mv a0, a1 ; RV64-NEXT: .LBB46_2: # %entry +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: umulo.select.i32: @@ -2665,6 +2831,7 @@ ; RV32ZBA-NEXT: # %bb.1: # %entry ; RV32ZBA-NEXT: mv a0, a1 ; RV32ZBA-NEXT: .LBB46_2: # %entry +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: umulo.select.i32: @@ -2690,6 +2857,7 @@ ; RV32: # %bb.0: # %entry ; RV32-NEXT: mulhu a0, a0, a1 ; RV32-NEXT: seqz a0, a0 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: umulo.not.i32: @@ -2699,12 +2867,14 @@ ; RV64-NEXT: mulhu a0, a0, a1 ; RV64-NEXT: srli a0, a0, 32 ; RV64-NEXT: seqz a0, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: umulo.not.i32: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: mulhu a0, a0, a1 ; RV32ZBA-NEXT: seqz a0, a0 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: umulo.not.i32: @@ -2746,6 +2916,7 @@ ; RV32-NEXT: mv a0, a2 ; RV32-NEXT: mv a1, a3 ; RV32-NEXT: .LBB48_2: # %entry +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: umulo.select.i64: @@ -2755,6 +2926,7 @@ ; RV64-NEXT: # %bb.1: # %entry ; RV64-NEXT: mv a0, a1 ; RV64-NEXT: .LBB48_2: # %entry +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: umulo.select.i64: @@ -2780,6 +2952,7 @@ ; RV32ZBA-NEXT: mv a0, a2 ; RV32ZBA-NEXT: mv a1, a3 ; RV32ZBA-NEXT: .LBB48_2: # %entry +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: umulo.select.i64: @@ -2817,12 +2990,14 @@ ; RV32-NEXT: or a0, a1, a0 ; RV32-NEXT: or a0, a0, a6 ; RV32-NEXT: xori a0, a0, 1 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: umulo.not.i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: mulhu a0, a0, a1 ; RV64-NEXT: seqz a0, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: umulo.not.i64: @@ -2844,6 +3019,7 @@ ; RV32ZBA-NEXT: or a0, a1, a0 ; RV32ZBA-NEXT: or a0, a0, a6 ; RV32ZBA-NEXT: xori a0, a0, 1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: umulo.not.i64: @@ -2871,9 +3047,11 @@ ; RV32-NEXT: beq a1, a0, .LBB50_2 ; RV32-NEXT: # %bb.1: # %overflow ; RV32-NEXT: mv a0, zero +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; RV32-NEXT: .LBB50_2: # %continue ; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: saddo.br.i32: @@ -2885,9 +3063,11 @@ ; RV64-NEXT: beq a0, a2, .LBB50_2 ; RV64-NEXT: # %bb.1: # %overflow ; RV64-NEXT: mv a0, zero +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; RV64-NEXT: .LBB50_2: # %continue ; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: saddo.br.i32: @@ -2898,9 +3078,11 @@ ; RV32ZBA-NEXT: beq a1, a0, .LBB50_2 ; RV32ZBA-NEXT: # %bb.1: # %overflow ; RV32ZBA-NEXT: mv a0, zero +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; RV32ZBA-NEXT: .LBB50_2: # %continue ; RV32ZBA-NEXT: addi a0, zero, 1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: saddo.br.i32: @@ -2943,9 +3125,11 @@ ; RV32-NEXT: bgez a0, .LBB51_2 ; RV32-NEXT: # %bb.1: # %overflow ; RV32-NEXT: mv a0, zero +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; RV32-NEXT: .LBB51_2: # %continue ; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: saddo.br.i64: @@ -2956,9 +3140,11 @@ ; RV64-NEXT: beq a1, a0, .LBB51_2 ; RV64-NEXT: # %bb.1: # %overflow ; RV64-NEXT: mv a0, zero +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; RV64-NEXT: .LBB51_2: # %continue ; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: saddo.br.i64: @@ -2974,9 +3160,11 @@ ; RV32ZBA-NEXT: bgez a0, .LBB51_2 ; RV32ZBA-NEXT: # %bb.1: # %overflow ; RV32ZBA-NEXT: mv a0, zero +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; RV32ZBA-NEXT: .LBB51_2: # %continue ; RV32ZBA-NEXT: addi a0, zero, 1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: saddo.br.i64: @@ -3011,9 +3199,11 @@ ; RV32-NEXT: bgeu a1, a0, .LBB52_2 ; RV32-NEXT: # %bb.1: # %overflow ; RV32-NEXT: mv a0, zero +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; RV32-NEXT: .LBB52_2: # %continue ; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: uaddo.br.i32: @@ -3023,9 +3213,11 @@ ; RV64-NEXT: bgeu a1, a0, .LBB52_2 ; RV64-NEXT: # %bb.1: # %overflow ; RV64-NEXT: mv a0, zero +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; RV64-NEXT: .LBB52_2: # %continue ; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: uaddo.br.i32: @@ -3034,9 +3226,11 @@ ; RV32ZBA-NEXT: bgeu a1, a0, .LBB52_2 ; RV32ZBA-NEXT: # %bb.1: # %overflow ; RV32ZBA-NEXT: mv a0, zero +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; RV32ZBA-NEXT: .LBB52_2: # %continue ; RV32ZBA-NEXT: addi a0, zero, 1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: uaddo.br.i32: @@ -3077,9 +3271,11 @@ ; RV32-NEXT: beqz a0, .LBB53_4 ; RV32-NEXT: # %bb.3: # %overflow ; RV32-NEXT: mv a0, zero +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; RV32-NEXT: .LBB53_4: # %continue ; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: uaddo.br.i64: @@ -3088,9 +3284,11 @@ ; RV64-NEXT: bgeu a1, a0, .LBB53_2 ; RV64-NEXT: # %bb.1: # %overflow ; RV64-NEXT: mv a0, zero +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; RV64-NEXT: .LBB53_2: # %continue ; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: uaddo.br.i64: @@ -3106,9 +3304,11 @@ ; RV32ZBA-NEXT: beqz a0, .LBB53_4 ; RV32ZBA-NEXT: # %bb.3: # %overflow ; RV32ZBA-NEXT: mv a0, zero +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; RV32ZBA-NEXT: .LBB53_4: # %continue ; RV32ZBA-NEXT: addi a0, zero, 1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: uaddo.br.i64: @@ -3143,9 +3343,11 @@ ; RV32-NEXT: beq a2, a0, .LBB54_2 ; RV32-NEXT: # %bb.1: # %overflow ; RV32-NEXT: mv a0, zero +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; RV32-NEXT: .LBB54_2: # %continue ; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: ssubo.br.i32: @@ -3157,9 +3359,11 @@ ; RV64-NEXT: beq a0, a2, .LBB54_2 ; RV64-NEXT: # %bb.1: # %overflow ; RV64-NEXT: mv a0, zero +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; RV64-NEXT: .LBB54_2: # %continue ; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: ssubo.br.i32: @@ -3170,9 +3374,11 @@ ; RV32ZBA-NEXT: beq a2, a0, .LBB54_2 ; RV32ZBA-NEXT: # %bb.1: # %overflow ; RV32ZBA-NEXT: mv a0, zero +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; RV32ZBA-NEXT: .LBB54_2: # %continue ; RV32ZBA-NEXT: addi a0, zero, 1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: ssubo.br.i32: @@ -3213,9 +3419,11 @@ ; RV32-NEXT: bgez a0, .LBB55_2 ; RV32-NEXT: # %bb.1: # %overflow ; RV32-NEXT: mv a0, zero +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; RV32-NEXT: .LBB55_2: # %continue ; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: ssubo.br.i64: @@ -3226,9 +3434,11 @@ ; RV64-NEXT: beq a2, a0, .LBB55_2 ; RV64-NEXT: # %bb.1: # %overflow ; RV64-NEXT: mv a0, zero +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; RV64-NEXT: .LBB55_2: # %continue ; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: ssubo.br.i64: @@ -3242,9 +3452,11 @@ ; RV32ZBA-NEXT: bgez a0, .LBB55_2 ; RV32ZBA-NEXT: # %bb.1: # %overflow ; RV32ZBA-NEXT: mv a0, zero +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; RV32ZBA-NEXT: .LBB55_2: # %continue ; RV32ZBA-NEXT: addi a0, zero, 1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: ssubo.br.i64: @@ -3279,9 +3491,11 @@ ; RV32-NEXT: bgeu a0, a1, .LBB56_2 ; RV32-NEXT: # %bb.1: # %overflow ; RV32-NEXT: mv a0, zero +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; RV32-NEXT: .LBB56_2: # %continue ; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: usubo.br.i32: @@ -3291,9 +3505,11 @@ ; RV64-NEXT: bgeu a0, a1, .LBB56_2 ; RV64-NEXT: # %bb.1: # %overflow ; RV64-NEXT: mv a0, zero +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; RV64-NEXT: .LBB56_2: # %continue ; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: usubo.br.i32: @@ -3302,9 +3518,11 @@ ; RV32ZBA-NEXT: bgeu a0, a1, .LBB56_2 ; RV32ZBA-NEXT: # %bb.1: # %overflow ; RV32ZBA-NEXT: mv a0, zero +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; RV32ZBA-NEXT: .LBB56_2: # %continue ; RV32ZBA-NEXT: addi a0, zero, 1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: usubo.br.i32: @@ -3343,6 +3561,7 @@ ; RV32-NEXT: bnez a0, .LBB57_4 ; RV32-NEXT: .LBB57_2: # %continue ; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; RV32-NEXT: .LBB57_3: ; RV32-NEXT: sub a1, a0, a2 @@ -3350,6 +3569,7 @@ ; RV32-NEXT: beqz a0, .LBB57_2 ; RV32-NEXT: .LBB57_4: # %overflow ; RV32-NEXT: mv a0, zero +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: usubo.br.i64: @@ -3358,9 +3578,11 @@ ; RV64-NEXT: bgeu a0, a1, .LBB57_2 ; RV64-NEXT: # %bb.1: # %overflow ; RV64-NEXT: mv a0, zero +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; RV64-NEXT: .LBB57_2: # %continue ; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: usubo.br.i64: @@ -3374,6 +3596,7 @@ ; RV32ZBA-NEXT: bnez a0, .LBB57_4 ; RV32ZBA-NEXT: .LBB57_2: # %continue ; RV32ZBA-NEXT: addi a0, zero, 1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; RV32ZBA-NEXT: .LBB57_3: ; RV32ZBA-NEXT: sub a1, a0, a2 @@ -3381,6 +3604,7 @@ ; RV32ZBA-NEXT: beqz a0, .LBB57_2 ; RV32ZBA-NEXT: .LBB57_4: # %overflow ; RV32ZBA-NEXT: mv a0, zero +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: usubo.br.i64: @@ -3415,9 +3639,11 @@ ; RV32-NEXT: beq a2, a0, .LBB58_2 ; RV32-NEXT: # %bb.1: # %overflow ; RV32-NEXT: mv a0, zero +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; RV32-NEXT: .LBB58_2: # %continue ; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: smulo.br.i32: @@ -3429,9 +3655,11 @@ ; RV64-NEXT: beq a0, a2, .LBB58_2 ; RV64-NEXT: # %bb.1: # %overflow ; RV64-NEXT: mv a0, zero +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; RV64-NEXT: .LBB58_2: # %continue ; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: smulo.br.i32: @@ -3442,9 +3670,11 @@ ; RV32ZBA-NEXT: beq a2, a0, .LBB58_2 ; RV32ZBA-NEXT: # %bb.1: # %overflow ; RV32ZBA-NEXT: mv a0, zero +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; RV32ZBA-NEXT: .LBB58_2: # %continue ; RV32ZBA-NEXT: addi a0, zero, 1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: smulo.br.i32: @@ -3537,7 +3767,11 @@ ; RV32-NEXT: lw s2, 4(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s1, 8(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s0, 12(sp) # 4-byte Folded Reload +; RV32-NEXT: .cfi_restore s0 +; RV32-NEXT: .cfi_restore s1 +; RV32-NEXT: .cfi_restore s2 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: smulo.br.i64: @@ -3548,9 +3782,11 @@ ; RV64-NEXT: beq a2, a0, .LBB59_2 ; RV64-NEXT: # %bb.1: # %overflow ; RV64-NEXT: mv a0, zero +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; RV64-NEXT: .LBB59_2: # %continue ; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: smulo.br.i64: @@ -3616,7 +3852,13 @@ ; RV32ZBA-NEXT: lw s2, 4(sp) # 4-byte Folded Reload ; RV32ZBA-NEXT: lw s1, 8(sp) # 4-byte Folded Reload ; RV32ZBA-NEXT: lw s0, 12(sp) # 4-byte Folded Reload +; RV32ZBA-NEXT: .cfi_restore s0 +; RV32ZBA-NEXT: .cfi_restore s1 +; RV32ZBA-NEXT: .cfi_restore s2 +; RV32ZBA-NEXT: .cfi_endproc +; RV32ZBA-NEXT: .cfi_restore s3 ; RV32ZBA-NEXT: addi sp, sp, 16 +; RV32ZBA-NEXT: cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: smulo.br.i64: @@ -3688,9 +3930,11 @@ ; RV32-NEXT: beqz a0, .LBB60_2 ; RV32-NEXT: # %bb.1: # %overflow ; RV32-NEXT: mv a0, zero +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; RV32-NEXT: .LBB60_2: # %continue ; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: smulo2.br.i64: @@ -3702,9 +3946,11 @@ ; RV64-NEXT: beq a2, a0, .LBB60_2 ; RV64-NEXT: # %bb.1: # %overflow ; RV64-NEXT: mv a0, zero +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; RV64-NEXT: .LBB60_2: # %continue ; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: smulo2.br.i64: @@ -3750,9 +3996,11 @@ ; RV32ZBA-NEXT: beqz a0, .LBB60_2 ; RV32ZBA-NEXT: # %bb.1: # %overflow ; RV32ZBA-NEXT: mv a0, zero +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; RV32ZBA-NEXT: .LBB60_2: # %continue ; RV32ZBA-NEXT: addi a0, zero, 1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: smulo2.br.i64: @@ -3788,9 +4036,11 @@ ; RV32-NEXT: beqz a0, .LBB61_2 ; RV32-NEXT: # %bb.1: # %overflow ; RV32-NEXT: mv a0, zero +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; RV32-NEXT: .LBB61_2: # %continue ; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: umulo.br.i32: @@ -3802,9 +4052,11 @@ ; RV64-NEXT: beqz a0, .LBB61_2 ; RV64-NEXT: # %bb.1: # %overflow ; RV64-NEXT: mv a0, zero +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; RV64-NEXT: .LBB61_2: # %continue ; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: umulo.br.i32: @@ -3813,9 +4065,11 @@ ; RV32ZBA-NEXT: beqz a0, .LBB61_2 ; RV32ZBA-NEXT: # %bb.1: # %overflow ; RV32ZBA-NEXT: mv a0, zero +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; RV32ZBA-NEXT: .LBB61_2: # %continue ; RV32ZBA-NEXT: addi a0, zero, 1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: umulo.br.i32: @@ -3866,9 +4120,11 @@ ; RV32-NEXT: beqz a0, .LBB62_2 ; RV32-NEXT: # %bb.1: # %overflow ; RV32-NEXT: mv a0, zero +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; RV32-NEXT: .LBB62_2: # %continue ; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: umulo.br.i64: @@ -3877,9 +4133,11 @@ ; RV64-NEXT: beqz a0, .LBB62_2 ; RV64-NEXT: # %bb.1: # %overflow ; RV64-NEXT: mv a0, zero +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; RV64-NEXT: .LBB62_2: # %continue ; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: umulo.br.i64: @@ -3903,9 +4161,11 @@ ; RV32ZBA-NEXT: beqz a0, .LBB62_2 ; RV32ZBA-NEXT: # %bb.1: # %overflow ; RV32ZBA-NEXT: mv a0, zero +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; RV32ZBA-NEXT: .LBB62_2: # %continue ; RV32ZBA-NEXT: addi a0, zero, 1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: umulo.br.i64: @@ -3945,9 +4205,11 @@ ; RV32-NEXT: beqz a0, .LBB63_4 ; RV32-NEXT: # %bb.3: # %overflow ; RV32-NEXT: mv a0, zero +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; RV32-NEXT: .LBB63_4: # %continue ; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: umulo2.br.i64: @@ -3956,9 +4218,11 @@ ; RV64-NEXT: bgeu a1, a0, .LBB63_2 ; RV64-NEXT: # %bb.1: # %overflow ; RV64-NEXT: mv a0, zero +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; RV64-NEXT: .LBB63_2: # %continue ; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: umulo2.br.i64: @@ -3974,9 +4238,11 @@ ; RV32ZBA-NEXT: beqz a0, .LBB63_4 ; RV32ZBA-NEXT: # %bb.3: # %overflow ; RV32ZBA-NEXT: mv a0, zero +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; RV32ZBA-NEXT: .LBB63_4: # %continue ; RV32ZBA-NEXT: addi a0, zero, 1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: umulo2.br.i64: